var/home/core/zuul-output/0000755000175000017500000000000015136646173014541 5ustar corecorevar/home/core/zuul-output/logs/0000755000175000017500000000000015136653552015504 5ustar corecorevar/home/core/zuul-output/logs/kubelet.log.gz0000644000175000017500000256042015136653374020274 0ustar corecoreV{ikubelet.log_o[;r)Br'o b-n(!9t%Cs7}g/غIs,r.k9GfD  >KEڤ펯_ˎ6Ϸ7+%f?長ox[o8W56!Kޒ/h3_.gSeq5v(×_~^ǿq]n>߮}+ԏbś E^"Y^-Vۋz7wH׋0g"ŒGǯguz|ny;#)a "b BLc?^^4[ftlR%KF^j 8DΆgS^Kz۞_W#|`zI7~U Pm,UTV̙UΞg\ Ӵ-$}.Uۛއ0* TQ0Z%bb oHIl.f/M1FJdl!و4Gf#C2lIw]BPIjfkAubTI *JB4?PxQs# `LK3@g(C U {oLtiGgz֝$,z'vǛVB} eRB0R딏]dP>Li.`|!>ڌj+ACl21E^#QDuxGvZ4c$)9ӋrYWoxCNQWs]8M%3KpNGIrND}2SRCK.(^$0^@hH9%!40Jm>*Kdg?y7|&#)3+o,2s%R>!%*XC7Ln* wCƕH#FLzsѹ Xߛk׹1{,wŻ4v+(n^RϚOGO;5p Cj·1z_j( ,"z-Ee}t(QCuˠMkmi+2z5iݸ6C~z+_Ex$\}*9h>t m2m`QɢJ[a|$ᑨj:D+ʎ; 9Gacm_jY-y`)͐o΁GWo(C U ?}aK+d&?>Y;ufʕ"uZ0EyT0: =XVy#iEW&q]#v0nFNV-9JrdK\D2s&[#bE(mV9ىN囋{V5e1߯F1>9r;:J_T{*T\hVQxi0LZD T{ /WHc&)_`i=į`PÝr JovJw`纪}PSSii4wT (Dnm_`c46A>hPr0ιӦ q:Np8>R'8::8g'h"M{qT?6vqHP"P.dTrcD Yjz_aL_8};\N<:R€ N0RQ⚮FkeZ< )VCRQrC|}nw_~ܥ0~fgKAw^};fs)1K MޠPBUB1J{Ⱦ79`®3uO0T-Oy+tǭQI%Q$SiJ. 9F[L1c!zG|k{kEu+Q & "> 3J?5OͩLH.:;ߡ֖QʡCOx]*9W C;6)SCVOאUʇq )$ {SG!pN7,/M(.ΰdƛޜP16$ c:!%Piocej_H!CEF L훨bِp{!*({bʂAtĘ5dw9}ŒEanvVZ?C}!w,ƍͩ?9} [oF2(Y}Q7^{E}xA|AŜt;y}=W<*e'&Ж0(ݕ`{az^su/x)W>OK(BSsǽҰ%>kh5nIYk'LVc(a<1mCޢmp.֣?5t罦X[nMcow&|||x:k/.EoV%#?%W۱`3fs䓯ҴgqmubIfp$HhtLzܝ6rq/nLN?2Ǒ|;C@,UѩJ:|n^/GSZ;m#Nvd?PqTcLQMhg:F[bTm!V`AqPaPheUJ& z?NwpGj{VjQS,؃I'[y~EQ(S +@gX*;RgXGdCgX JgX2*Ъ3:O7ǭ3ږA :}d,ZByXϯ&Ksg3["66hŢFD&iQCFd4%h= z{tKmdߟ9i {A.:Mw~^`X\u6|6rcIF3b9O:j 2IN…D% YCUI}~;XI썋Fqil><UKkZ{iqi :íy˧FR1u)X9 f΁U ~5batx|ELU:T'T[G*ݧ ؽZK̡O6rLmȰ (T$ n#b@hpj:˾ojs)M/8`$:) X+ҧSaۥzw}^P1J%+P:Dsƫ%z; +g 0հc0E) 3jƯ?e|miȄc̖F4BJ2ᮚ苮p(r%Q 6<$(Ӣ(RvA A-^dX?|!p+,ICE^fu `|M3J#BQȌ6DNnCˣ"F$/Qx%m&FK_7P|٢?I-RiAKoQrMI>QQ!'7h,sF\jzP\7:Q\)#s{p'ɂN$r;fVkv߸>6!<̅:xn<# -BȢ1I~ŋ-*|`В~_>ۅm}67X9z=Oa Am]fnޤ{"hd߃Ԉ|tLD3 7'yOc& LFs%B!sRE2K0p\0͙npV)̍F$X8a-bp)5,] Bo|ؖA]Y`-jyL'8>JJ{>źuMp(jL!M7uTźmr(Uxbbqe5rZ HҘ3ڴ(|e@ew>w3C=9k-{p>րd^T@eFZ#WWwYzK uK r؛6V L)auS6=`#(TO֙`mn Lv%7mSU@n_Vۀl9BIcSxlT![`[klzFض˪.l >7l@ΖLl gEj gWUDnr7AG;lU6ieabp៚U|,}S@t1:X _ .xI_7ve Z@7IX/C7@u BGڔE7M/k $q^hڧ};naU%~X!^C5Aw͢.@d!@dU}b? -ʏw |VvlK۴ymkiK% 0OFjT_kPW1mk%?\@R>XCl}b ,8; :.b9m]XaINE`!6uOhUuta^xN@˭d- T5 $4ذ:[a>֋&"_ }Oõϸ~rj uw\h~M il[ 2pCaOok.X0C?~[:^Pr򣏷y@/ڠ --i!M5mjozEƨ||Yt,=d#uЇ  l]չoݴmqV".lCqBѷ /![auPmpnEjus]2{2#b'$?T3{k>h+@]*pp桸]%nĴFԨlu |VXnq#r:kg_Q1,MNi˰ 7#`VCpᇽmpM+tWuk0 q /} 5 ¶]fXEj@5JcU_b@JS`wYmJ gEk2'0/> unKs^C6B WEt7M'#|kf1:X l]ABC {kanW{ 6 g`_w\|8Fjȡstuf%Plx3E#zmxfU S^ 3_`wRY}@ŹBz²?mК/mm}m"Gy4dl\)cb<>O0BďJrDd\TDFMEr~q#i}$y3.*j) qQa% |`bEۈ8S 95JͩA3SX~߃ʟ~㍖›f!OI1R~-6͘!?/Vvot4~6I@GNݖ-m[d<-l9fbn,'eO2sٟ+AWzw A<4 }w"*mj8{ P&Y#ErwHhL2cPr Wҭюky7aXt?2 'so fnHXx1o@0TmBLi0lhѦ* _9[3L`I,|J @xS}NEij]Qexx*lJF#+L@-ՑQz֬]")JC])"K{v@`<ۃ7|qk" L+Y*Ha)j~pu7ި!:E#s:ic.XC^wT/]n2'>^&pnapckL>2QQWo/ݻ<̍8)r`F!Woc0Xq0 R' eQ&Aѣzvw=e&".awfShWjÅD0JkBh]s9Ą|ק_;%X6Q@d 8&a)a.#ۿD> vfA{$g ăyd) SK?ɧ#stV \G~~]gaSZNg8>e!^f%cYr]qs:"̊[isXa]d+"z=x7p.fZCg_Ys;pE&\U}ܪch])qKYAـhhdEnU14.& * QIQs[rԩN·k83֖8Uuqu_48dHܥlWW q>fu6+'}x{u\Vee9]`3=?,ڼ"ϴq*(A/_틳= #^ub"6q f+=^ͰOI@߱^F[n4A#bYѤwd)J^;GT.Kwj;ϯfӗuaVad=$6)iI 'C~.1%YmҪI꾧fY iʱ= %lHsd6+H~Δ,&颒$tSL{yєYa$ H.>t~q؈xRmkscXQG~gD20zQ*%iQI$!hNVo^:y6(t˥C"*FFDYM@ƚh $ /ɓz#" b1D8,?tP q lKxDȜOY2S3ҁ%mo(YT\3}sѦoY=-- /IDd6Gs =[F۴Gc,QAIٰ9JXoz);B= @%AIt0v[Ƿ&FJE͙A~IQ%iShnMІtӳ޽:e<;{ N 譵&"m r͹=svAR3X"%q7<# '9݋l%w:9^1ee-EKQ'<1=iUNiAp(-I*#iq&CpB.$lɴ}S^Ox* /ff?*/N7kUrXi6;ĘU & h}15e,=q(ft&qL(MC)vg1Xa!&'0Dp\~Ԟ=7jv "8O AfI; P|ޒ 8qܦznl5tw@,u魙ڴ'$%82ho~z32h>`XT>%)pQ}Tgĸ6Coɲ=89 ㈤͉Ĵt2zG;ro&7U 霤_8ă8]Z&&t.Φs-6ÿt#,8MSG3K:RD-=w7j lV + FJ7!ɺoHZ6n`R֑& .Nv0vԬ]I˟vrM}F9X|FI#g.Gi)%!iKto}|ֵ7!ېATJKB2Z/"BfB(gdj۸=}'),-iX'|M2roK\e5Pt:*qSH PgƉUg'VKξ ,!3`˞t1Rx}fvvPXdQSg6EDT:dׁz^DjXp͇G|X5Q9K$)U/J':.,wؓaՁ_ 3]Q16ZYafuvrq^ѷQT},!H7{Jw>%7K{)rH+"B4H7-|}Wv8|۾~Us?yWjv5>xqRҧH-EeJ~0YIozq:noq V{q8sϷHoOͧzf ̙eX-4`T*lA" &;1;O]-wgϊ)h&i'"/ͤqr@8!̴GRkk ^|9-R )҈^XK&4'h9Dݥ:U:V[ 'Mȥ@ەX.p0Q.6dHҝdf}c/⨼0Κ_pL/q7j!dT/E n/p=t[h+s?Ny=M|o;f|l.g? I;.K*!<=+"yK5C]4uF= hQhhPsBÅV@6F \ NWLlY]K᜴=/ VΉIl4Eq gw$>n?>9|dUA";!$j玨Kx E$K3hN(OtÊ-#ӿfƏ N崛, 9g3Ǭ{&Vdӎ5W1!1KYd`,-*&Ӿ>F~/&jb.~cK5[g`!|d/.^oyh!; >|*'@x6yR>ngg 枤hˍJ_S{gskI\t`綘080ƱQŀllKX@116jao?NqUѢ)*v|oãee@7.z!<}Kj#IkXW Q:U>fQ*yEK*"_R[كrq IH!6=Ocnи%G۟"|ؔ^KПy׏<:n:r{~֍O8. ZsߴIJ>&I?L6i}Z^XpCًݽk-$pxbڲ&6*9mg>{rt?(vF/\4ZfR-dOVaz./j-yGNMOK?2_~3\z=z}NG$*A> IcuR.oӔ}w$Pz@fq xs  xh\L r Ѥo Zt(Q >~|$>tnMdэoe:9[_v~\:P ؇'k01Q1jlX)/ΏL+NhBUx~Ga>Z"Q_wjTLRˀtL L+BT҂ll魳cf[L̎`;rK+S- (J[(6 b F? ZvƂcW+dˍ-m𢛲@ms~}3ɱ© R$ T5%:zZ甎܋)`ŰJ38!;NfHohVbK :S50exU}W`upHЍE_fNTU*q%bq@/5q0);F74~'*z[\M-~#aSmMÉB2Nnʇ)bAg`u2t"8U [tJYSk, "vu\h1Yhl~[mhm+F(g 6+YtHgd/}7m]Q!Mę5bR!JbV>&w6οH+NL$]p>8UU>Ѫg39Yg>OF9V?SAT~:gGt $*}aQ.Zi~%K\rfm$%ɪq(%W>*Hg>KStE)KS1z2"h%^NEN?  hxnd/)O{,:خcX1nIaJ/t4J\bƀWc-d4M^d/ ʂK0`v%"s#PCoT/*,:[4b=]N&, ,B82^WK9EHLPm))2.9ȱ  QAcBC-|$M\^B!`}M^t+C~Lb }D>{N{Vt)tpDN,FCz~$)*417l;V iэ(_,j]$9O+/Sh]ice wy\Mڗ$,DJ|lj*à␻,?XAe0bX@ h0[}BU0v']#Vo !ې: Z%ƶ(fl>'"Bg< 0^_d0Y@2!ӸfZ{Ibi/^cygwדzY'Ź$:fr;)ٔf ՠ3Kcxwg*EQU{$Sڸ3x~ 5clgSAW"X Pҿ.ظwyV}̒KX9U1>V..W%GX +Uvzg=npu{do#Vb4ra\sNC/T"*!k愨}plm@+@gSUX覽t01:)6kSL9Ug6rEr(3{ xRP8_S( $?uk| ]bP\vۗ晋cgLz2r~MMp!~~h?ljUc>rw}xxݸǻ*Wu{}M?\GSߋ2ꮺ5w"7U0)lۨB0ח*zW߬V}Z۫ܨJ<]B=\>V7¯8nq~q?A-?T_qOq?5-3 |q|w.dަ'/Y?> (<2y. ">8YAC| w&5fɹ(ȊVã50z)la.~LlQx[b&Pĥx BjIKn"@+z'}ũrDks^F\`%Di5~cZ*sXLqQ$q6v+jRcepO}[ s\VF5vROq%mX-RÈlб 6jf/AfN vRPػ.6<'"6dv .z{I>|&ׇ4Ăw4 [P{]"}r1殲)ߚA 2J1SGpw>ٕQѱ vb;pV ^WO+į1tq61W vzZ U'=҅}rZ:T#\_:ď);KX!LHuQ (6c94Ce|u$4a?"1] `Wa+m𢛲`Rs _I@U8jxɕͽf3[Pg%,IR Ř`QbmүcH&CLlvLҼé1ivGgJ+u7Τ!ljK1SpHR>:YF2cU(77eGG\ m#Tvmە8[,)4\\=V~?C~>_) cxF;;Ds'n [&8NJP5H2Զj{RC>he:ա+e/.I0\lWoӊĭYcxN^SPiMrFI_"*l§,̀+ å} .[c&SX( ( =X?D5ۙ@m cEpR?H0F>v6A*:W?*nzfw*B#d[se$U>tLNÔ+XX߇`cu0:U[tp^}{>H4z 4 (DtH-ʐ?sk7iIbΏ%T}v}e{aBs˞L=ilNeb]nltwfCEI"*S k`u ygz[~S [j3+sE.,uDΡ1R:Vݐ/CBc˾] shGՙf 2+);W{@dlG)%عF&4D&u.Im9c$A$Dfj-ء^6&#OȯTgرBӆI t[ 5)l>MR2ǂv JpU1cJpրj&*ߗEЍ0U#X) bpNVYSD1౱UR}UR,:lơ2<8"˓MlA2 KvP8 I7D Oj>;V|a|`U>D*KS;|:xI/ió21׭ȦS!e^t+28b$d:z4 .}gRcƈ^ʮC^0l[hl"য*6 ny!HQ=GOf"8vAq&*țTOWse~ (5TX%/8vS:w}[ą qf2Lυi lm/+QD4t.P*2V J`\g2%tJ4vX[7g"z{1|\*& >Vv:V^S7{{u%[^g=pn]Y#&ߓTί_z7e&ӃCx;xLh+NOEp";SB/eWٹ`64F 2AhF{Ɩ;>87DǍ-~e;\26Lة:*mUAN=VޮL> jwB}ѹ .MVfz0Ïd0l?7- }|>TT%9d-9UK=&l&~g&i"L{vrQۻou}q}hn+.{pWEqws]]|/ǫ\}/J.MLmc ԗWrU}/Ǜ+sYn[ﯾeywyY]]¨Kpx c./mo;ߟRy*4݀wm&8֨Or4 &+Bs=8'kP 3 |}44S8UXi;f;VE7e4AdX-fS烠1Uܦ$lznlq"җ^s RTn|RKm;ԻZ3)`S!9| ?}m*2@"G{yZ${˪A6yq>Elq*E< NX9@: Ih~|Y4sopp|v1f2춓tcJ{F_e&Nfx;z'AnJ\=~,-D1TUWGi$1Z)HMRyGI X%uR';Jd vH0VXXe4-LǪ H` Q <#Jjq{WؔciUQjr(z )V1ZEmU7VWq=K.?Q!T$2Vi6B}GI&2ZGe0VXmd?njӂz\F*\ ]7(Sv1(jsn7nC(ytU#[ ߿D4]YxƣQ2=3?ÚbY3N!h'9We?ڧer5N ȝ㴸?.)v]Rvb@D]7%K2CYV ^_kc/"lˍ\Q+Y,b 7v3c hpm }E`)ϏSƿR5ʉm]ҷ&: Y1ʢjf酱gj?x||A8}LB?5䈼M&Xny:x''="ֿ;-sS 1kq fnl#iP>a1c^H;N-g+0o4&h)dHν~B18Mi/v3␩cZ 3Q6oU#<9`כGk{{{fHrmV_>`ǏtsL'>H~]y*ar-a`t,?f+ J =vudvQзD4*K.ߪmT/^&q3bN8hg{(ߕۮEyxP#$PX(.i)4Qq5逶,gO䥁$ecSzx7Iw`(^ۖ)%PC&'Ȩ/q#AY(ELӺXYzeB \y;!.p *F\8_6y+߿c"zfe*P7EsYTrEIMɯ''NV}$>1x7)<>$+q,:ۛEѰg4MDޠHI:N槟& {~x{o_>~]EBA͙h*ezUDjMUnSG3)F|''IFGx'x촠T !019_NWGFv*g]>(B ~~߉m(dNK SC4oT&!(0 `qVN悵(7ށ8Qp'DvVJ[YAK_O} $! ׳w@TEHg/ ;~:g0rȭUG6ddye.Psu^TdpU=@a ].et" "ؐ#.bڦ1=[X< %qd &Oj/*kduwE{$rMqKY n+a4{]"#,Ͼ ;ÌBS*i y <]C\IYtJ"/* b[Ĩ;dpDeylk=I׀V %- )[7W 0=2Q6\5J݊\6\lߢ lw,Z\Q.:)Xs"%M*W ]˒r)@.Y(xݺ+ Ծ)֎t ,R2vWI4fܬ]ha:0uZ94*T -)Kk#p:UAT6m`;:^38^Ӷ^nFzP'o+R18n g'wẈnɋ nW"7gyp=Uս\+PCmɺ[ۊͳ6Gmԑu *F"$= 'RӥX2Wo udT iBc8PՃʅPVoT5Hެa!նC -Eu}o#v8muzR#[ u{Et9mtPWWTK VhO-/Z7M@]";wwF)o8~2*wuO7g+E%Ւ(Sgi:aLIt*mi#CKdu:XQ3A r%!DZ=h4DL\k|-ڛUաz闵%u(Pmʹ ꊯ s.ꁪMqKrQ(¦C) 1uj65[ ަpqU<{c1Qg<麥M-sdӹ͛i"+EX8G;䥇jY@tϠK!quZ42ғG1ѷtv;wU9`yZ,WS}uX5ߪ-y5!*L>(ւ"+ׁ ryCi"|}2\i:@X:^k7y?ޡGG5 ; qX8>RAׁx8t;PҺ,.Ti`qк}wky# g!e._#g,%*-Ț18漻Tܘ4wibg:gj$hD m3 lHۦRdY=vy4G[<YT]mҺ;_Y&;MTd;=KC¼2Xu,Hwϸ׃HU)"-׃(#naLrѦ,׹ę8 Ż\GVY! 1ӝy׬ѳƺq{-܉6ft9Vi:h7Y!#Cxyw>jp A\g x]7l tЁx;Z d߻|?R]U0L ЯNL_Y*p=u pa'H{:lW&>"J*-*Nߝh@õ2YC쓿` ~YԙyPM Tվ~Œ0Pwo Dj 2w (B\@^)A4#fhVPPx{vJ'VUoס"pmI(¨>WN3y)0@L֜;$?zJB]#Քaq <h񫶞&i}@Hn;@8i b䁻-~^k D(@yQN_BtL0-oHB;ߑu[C!o% }x- o]4DO Y<=t]B9ТOϳw N,Ew5mJ_!ݽ&%cmڼ=`[$'ږO7CJ28YTa5ď ?3!:o B F1֞P:WQg/Q8E`U6x-g]Æ&ѭ¡G$&c<țo,y'NoN^ˣAO<W`1 3%!*|±iy Ej =1t H7A8б~܀0M# rzu  'S@LO#*!`1`HN}J$@8tw@DCiOLp~?( b[:u(K zyL\2:,?Xs}~m|$Ji<}W߀@tt D?<[ɳqWauz X<7RӾ !?~!~#Cb xz뢁ųE!Cv(: &xl@/z HL<`DOBS cD_,`Ȭ'p@B.\iKZ0"%n?\KA"PyYl}ro/׿\ݞ8323SW6dT+ v"[~ |"n8p L6ڑ&癮ϸC_ fN97*ʝ^(qxOcz[e;.)= aA,{aNz&Pt]-9O! 37Vm/w@$zb)8_[s!TMBu\hp/tM>;x( ۯ0Na+g}.|M\-;8Ï$` l$T{ ={3&3E_\+ ĪH48#;=C]g`$ J:Jc=kKq7w#ܥtM ŇW?ML=O1y"ؽ 㾠mPHM.Ga"^>wx_~>Q\lJ*8LW`H"W$+I>Qi0 K T`0].:OvOdj;!۴pѬ`cBrXFKKw|As˨1h: BeQZj%FL|FUTxT->g V Oh+|'B*SfMx6\L93)E"l0Y.Y1[sb6ĉqH *oe bހ. ?)}8x&.Id bԡ!wCƆKm V3&HZQXҧB7qyȵhc d\ o#> )6`k&JEtϪ_4Ba8JhMzFøX,З/?@ZkQW1zp=/#DWEش(GMpחJ" "j sxHh>ao!6W G08{r( 2p=|Q<HzXjjh$Y C6cZ6_٧LL `ޚO?1sǡQV!bPxf} l};(J̲ ths4Cl3W 3c=5EqF<9;;| t<1$E%`D ^j~8JҫUJȰ-Pl?\1T( J| W:6ʅ?dЅ [$cjs׍}w>S_[/pPѯ_¯3m?Ч6g98>5'yq2_G[PM bZ=GRRvA*{;Jgyq3Xԉuɟn$q|"~=:OzZe[E 0?.c҆şrh{. VXdis>a8`pӱx19na"fq:]6ÃNMưrea:t0dD,%B 5/S4 hzpԬj57\ \?7+s4p( $NFI0v-k Kq㨫&PQ}֮uA8:`wqPp!M6p!dU'8۵ڵ.M?VX;Y@%k^8d&,,aʯPEn'!PޱʯAuaQ;t;>"UfaH\DZ)]u~..mzNCl,J{6IU`gm@R{4*:PQ4@LRe76l Be$mC!eی){ĠG=vXVz{` cft6jFHJ-uڄ:[lO# u [ʷ'F($| B6FHB-ڄz[mO# ooA=n$߂РMh<` BEPb{BnG*6KKtBS%| y_ωtbUw; ly>eVy~>ƋLZ`8@ʼ1ly$pTR*o!fJވ2-g^#5~)CF*:$_i;d|HZpSq·_C׳(.s$E)yW_*p }2J^4( A8z ɨ)hqZfSy+&m "E#,T6 {ZR"ge"C2Aj!Ba*zxԌQ=M` XFE_Ӡ t:G{O ~xkWbkU"H%GPEcEФ7[8V>z I*W#e,NG1 ϰT~ZPCYejϴVNqoOzD*@|zjIkYُ=7% 叠NTACqK?LQP﹔PT17ݣ^; ̥ZO=_73?1{gڻ=ꚏRٹH!tPsq>~y6?-Vo*-֦=[Ȭq2Yq r`Ɇ>,.C /e%~hPX>勧?C1:?sQ;_XeR(GS/ q3\gV5If0 m858+@Ǩ@t%C0Viǵ=u^|f;`xƾ9 ,A.r#D6ˌ]r OFkcHIi݁)Ӹ0ʸ̌xC8I~=Pi'7F5ͰN =fD(rJ#H~^L; ³;De`l-*u+yT]Π/ FRݿ @jʫs@Н"岼gѡ'=w8\[jhh"_Vt(+Tg"(%UQFקNTJ,ϗ57+r+-SҤ0 >};`.G&pBlP ; ]2p'An{㚷[_N򝌷gng_»:L *O2;W` Smo&KVM/9heypz($1r06]6/*<֐,ӿ۫JKp}1Y&F$4M`)^*Ͱ4>_܄E3Uel ؿ8bîA.]1H TܥGGthIϴY56y38KU=z4܄~\/Bh 3ZT֒6 +ϋ$mS[x&a1ݎjE(_90C>|WtOz\v7][}k*%CcϬ@9ګ1/SPWԚxH>.i!vP1S{mVwYcr]MVө ""yu&wf`zt>!=pNdӠ5:JՒK95VV\UGb?U[LuRށm5L)4iCoM躋Ilp_:$g%Y{"͞N 9:aMQ,w`42@y)Qϳ"CN,QM!>MJ<"_խCK|G#ÉlcA&^c+aA0q0J̶o%]C0FhyUsM[0QyVr u/X`ݠ-Ԑd;#R:#&3VgbLcucm|\Mc͏ӏ]M"lǟ[WګW=xҔ섒兗՟')'PwM4U'{>mz&gᎻެys|P^G?>Q\M;^J\_M%op]GƮ0IGkO=o:/`4%.#_JM?<")^9/m9sFZ3lQ7V沕Yi3TfIW 7a1e04Cs鱷5z40]JfDJ&V-M6闀{nsbaI]XUU_FjN`'kLB)Feu`DʊHfa`*Rh2Crn #TM$8]tkEno#l tcKOA4$YTZc)uOR%fDH:S ~Y jz.4%S?mZεHդ3&z[rֳ4ˆjux$BS.jfbNāOtXB [I{! Got@(q= *FN0P\q͙Wٝo$8*(gzR_Y"'.uufolx[o Ie%n^qDn{>4+m<4#~f'֋E0M#Y^TF[9z5= |u7HpcO08-EЍM I#S2Lja?=mwSK\{>wD;wg`6KmN8b:mޤʎUkEOuJBb3~tgl-Z48miz0*q\ g) u1?} /Q2b.g7GkZ)"IP_~apN҉Wg5s6b[X^nk>=|*KEsXˑI/!?l"fY"92n> 9D.%}}UFdJK֟"v[`,h<12HxlUB$:wF-J\qp椁3gcpBCE"Đ=\.0_-xI5WR=db aQy ,h`&' <&k'g{ ft^7kHp b5EOɕSU|_օL- h 0̓TFϚI[!<̃7s_QaZ٨px G/e-"XKG*Wo!M4xaw (5 3JȔ uS`w$o`ƕ5&CysZ ~Yش6i4N0f.FZC_1DCGq҅JE8i+)ô7q]\\ Fi ftqyV}^|fpGk!RY0d!n{_J7S HLyil ZG_ucH wzZ#P S ߤ2$Ig`vG[o5E6&q$;Qt-k6'v6d؇1ݘNLSmkgݢKW+{&[owD;Yc8ϖ\< Վ']5x?!pJ.lJ5H)ޓY;m7.uGȕ6.m>:]f"Q4fp֥ZT sޚPdXe cX^n$d_*ȵ w,`dq& F@'b7I#QP7xg(`ʚC^}즂:i! $R2pgH@PWϓY!o0ѥ;Bg\cy3nݥ3 rmvB﹟zH q<}cI/=%o10î\o $bdp)+(*|a.O`FKA1ra@$3^kvzOe*¯ 4]88(jQ3r$r1b"}|͡}$8.5W+/7>>7 =Zʺ̬q\)[ ̼g2-شQqu;*F$wzD/b]Ja~_z1ipˤu/hyĊSڝ7'}lbO`fȜKCi+'Z| gF9 kރR[|X*$.~ WNݗÊrvY;f0F8g5:cƌMx}.$ /5x:T)w bQ )01.Es,'Mems!<;ŷ$8ex:(5a}Ni:zV,~{|&1EMbV!zpURWQL/g5|tC?^+_C[mVZ.{4y;?ǯ跿5|~7}CǺCmKwrv>p'<aUsz8C%f PCML޵?mcmwmv:q6æ0֭^Gl&=)H4%؎Dr%a$l0?@֏qd_~1%8{ӟ]_i$Bf^U|,?!}<ߍN{C0?f&\12@$3`M\.YS/f:&߃GWx== n:D~ʮ`/;ųDoAov |b q8/C0w87#8i( b@e~^0钗AQK*`kl?ܭo; %\9Z& fZPV"V_fVxn f *Cqpzo\aRE*z!g;? 7,nP2.ws!_ yX^i\VvW,yY|ӃxEZ~ݔ\ym.atgZZ-6G?p2{-O}sYn f12?.dcG!\Da:?M=Qv py?쳙|]]j^9VRۄSeg(XX(e%_GWZO|f'~a袜(;!%[54 FQ:no9ggLV$\o`ZM* (~+,M? SDM+ F7ٹ10f& HL؁[Q{ _gg:1Wlvu_lYw$5v7Ins:74qrOa;&w}B4?i5e(i sLZ4 Ҁ O (- Z̰_!P*&SUџLI^xS[!ϕ:CmI*YB2EZ/S6Y>kՁgTi6=,U3^pudR)?8kUX@5q5mkڠ=樶f¬:!Pk!pC..'(ĸ] %i6HZ viMڠ6[Kog0.-ۣP1]¹O0k'A0-b E؂ۥ fv i!HyIZo"y ȘRZ5(m"cJU{Ș"NDbibX{>A-PK,l߸(KM9(DuZcd]Wɼb4ݶx+iAK5kzP#y˂K; Für)]z;ݿI/:]3xA y{LN|4(a  R*SS,PƝ3R:s r Lh~-V@<Xr)y?{B=SX\ɞ}i=c@r=jBX'eBE1UOP/z%NNʚzѴN9n?P3}5RQ9{'G17wbz R\6+ܞ"e ؽPQ^voYCR,JE"O2u d۫9sSu4{t}Gfjdk7WE96e:Drބ"QZ,zx%%C 'JG cEIR#ᨾδ߳'|2EpO7Oq LɔhCD MS2N( SΤJIi}~^pwn~rnӺI `Nû`Pi5O~h4=W54Cұ֌MqIY3<'JPcK Z" 4#hH &44+Zf}P-ͨkH736ʚ 'Y^i eH1BPAoq%N/.5yӭ5jiF݇ 2Ic4cӤʪ5s2d@ x( ȁrQH)DZZg3, ;ooM=5{kgH򺔬J%퇀4A<F1\1@%};/޼:އCe6fAAKљ\lgm78B_-\ٽY0Av1W13 UY+\v3o{P?zdt)*-lo*T:m}œ ̄bNH<ý_5M%A?j {-J*b*NErA`ݞ\<+pk ވ\?菉@C]%L_ʡgV'9 NՒ[縗O&iB' 崋h7%8,XyOL Mr΅H,ުtz9]/QCYǓW~4 #pˆIUD5$:Xs Uo"٪4kOvl쫋 Vhao&Zߘ.vP8?F^~M0NC lpwcIQ/y7ZA^ns @zǀn9 6@McDw gpC61=т 0RL"%gr$g<ވp*C*f\/L{œ[I$sn@ɸoؘW\r7,ZKOpjȁĄb{.W`~r5q$[դjDcmwvyDsN]&'X$v)0!J"\rgv{]2,-z6 ,}u,zCQir,ijA"?miJ(Np{4b%PBMR͉BHSjh(ea:0iiR켷ҩ:(jhkC%Kp[4d'̷1#|Se4eii7`a0fi 2*KyA[)0sTPNu)UYUbxm;.;. nMGmVY;igi&qr0(T^s2#LZk1f1"ަi~7|ǵP͝vZڈ}ڜ8< (Ĥ\I`;,WW+I+)o6E{Aiwb7̯dy@{L3c)a: 3jB >S~A U]ϵԲƐ#TV=j0,c+8D杲J E!`Q): 3DK!ǤMh;) QLXy 0XΥNeR )P,Dd @K6r 'H%u1b֑Z`´wMU{o]ZZdѾoWFL{.I#TTO\vz,;xoykExK:WVjufxm|Eqفj=Bj uֶw(  wi6S!<7وr͢o(NtLscaSy&M3GNaZP,R8oѸHTߜ{m;8eFi(`V O5She;J*9rfA3WPx<.En^XָyA\Mqed3,RRŭ) lX*fx2d$OS)"藊vo{`MJMBus[RB'@( G? TLHQsWi# nȸ;d\$0"ˮ`TB$?Lz}B=0d!={?枙4gA5* ~~ `2ΤP[<;`DzzKS~/~.|j\޻O>??{C c.vN aL{(=\/{5S0ZNǜs?vC "(ߟ^-:t`3'&%G%)K('/6gi=DjP? 0Wd,p8!rQ#Ҥ*HQaTJq2h$ $,R;D%HӈBbnpOѧq¥ѱM`(JbS±\H$AMߥ"ReMp#\}Ix@FEBHa.*J k4B,AB"$ET0ZZ$PF_9 Z²C#ܕ6 75kp Z> 0)]S(u=}V;*E OH Jw8'ݢΙ5p.hWA2K*Q&(ui98+c·eIv  FK ?JJKЬbޣlk.rU ^j2u9eDˆL q{DKiRO6IՑPcy<qÑ#Q3LQL9FTP1X0#-:5Z`c66ccD2C,DVdK Gp-0vwVy d@ɦrB$57W5.2Za'ZIi#Rʤfq\K Kf2;5gl'Pd'}o|ق|Vt+ GD*T>|Vt>7W^+1߭IU"iMJ+eܒ"EN$U a"ĭHA*Ɗ<itCVi3|yR4l8 \6OD=Lr8}u+=qmhe.c:.#X(DlcD,D*!zizC,{iڸÔ`'-Wrr_+nuǘ!kcm u챴*|9xpʚn8~ rq6 =MΗIjƽO9"r<`wwX?]-nWXw2W y;mK(?rR`yrQ6HI| 5XAZz p~ 8?<n+7=ByhKL"_kU7nFH)F2# uƌp;T =\b&C%\&pТ)DwJcq)D1{-h pa*%)ؘʊ(ou4Ri9?rKQ2v.A- l |Kn.(quݙ.@e ˹_d㕍v,@f -)i:޽ڝp^ t~GA㨩xT- =EnU!r7!,O,=|7;fSˬAvJWI"MB! p$&u㒂01C)c$VX %M$1i"LT`wvH*9pukһmh5Hm&ޘ/[D9S]Mmp Hv{scΊ̆;<܆qgnN\8TE!EKTk -=Guve8O$ 6#;T_7&]yB-A}w;2qcJFJ 2$&҂F"ktL5ֹ6a(c4E c}> fc/CԨ-Tt Kp[0 ho xVs^CRhc}C$H 7#iZUVJJҢrSJD,$/8'qHNrF8eqB܀ygypJK2{Db.hyƧF0Zٌܷ7J akbc^ܰNtuSj1Luk3!cڂ0u&| fruhE"ud ^$(M0w#` 8ˁ͕b D bc*]XGƀ=MMA`RWAZi81\¬BREVH$J uݜUz0(;)n[7t8bN˦j}I]؊tSw!xOX5%aAZ? fEj@LLzY;L*&HSeKMkB ."iq18>!juf3k0X%nJ(k>|h W6E[#K," }nTqfJgjYYwY\pZ\oI*ocԵ{GPT2jK,u;3A`b%~^{_)af Yifnmq?0+#CJj1b%acBL"bNJe*G$ӌ:7 pc`IӬb nlP Y sD%un(COEJ:=#;![xXI~}?'hVܛǽs+sm{ʣIPцdVKqBH~t~?0y?ϒ\pߧ+y(cSʔxn$4 0^/p`{v7ŏk]y^A /-T/߽[goB˧xs{M|ұb#:ARf[P@uߘW֜l\gV:\U9$yX7:JZ68.F݌/["- #Kq^;vug+*0=Ċ?i?~Bl~wJ2Dw cGup_u[6z(yywѺ-e4I;!d@F7c3oX{׵ˁOdyx\t4pka-4Q j`NRQ 7|ܠT 7T(YP)*Ph*JrybD#h .3X>+rte'8}k1 `$E9\;L{.)jJ/+h|-NJ/Im\wS6+86%$ltn|{/o0pVΰoţo]:wq* r\'s+CcJ :r[̥uNm۠I̷~07OMֻ]$nk@ph8ڨOc68i6 kԄ8z6V)7Mv+Jts#Jy5-ce؆mڂ3BE$<@ w7^fs0N]*uK76[o&ؗFlL\ ;ƗYkD@;7k?2k5dRb-R(ȹTM]< kqJXq 7'A90jvN9^7 wӮqŀİ]C' VVQ5)ADqKb4JRŰ&BژRn5HҌ k#h3BKu@ukN}W1zO_ܫu,W βOh{o&r|?&{ l(s*=׊/]c ޱ#}?t4)JJq5{Qq>DH 9Y#W2s{8ecaY\_>`<3ڂYQ) ۷ W)*D`0:.Ewiw(1ƌer-.A?&8+c` Xb>/&?ѣWw=ktiobB~Ln~dov/ X,ScXj{O/5(W߮&SOYͿ, +}7m'4$'?ts:YʁQ,9{;{EBA{C@on+ֿFGτV()58DL!˜. p 4D fV_@〸wccCs #9i4C{VazCU<"UGUA`غ,c#1C y|$d=p)̬+,r;rAN$[{ډC&bjz+7*cH.;E]>.߾'Ã8y=K/9|-{u[]0b %N,\ Q16rtouj@lRW 8Μ6;v(C(B ʐ"x/lj*=? S|L 6ݫ℩"zL9d'xFa!git~E^ܒ5-X3 P/p14H%HaQp0!3ÅImқ੒c*MU'Ԅ[^hÅha(DDKzEB°ina=õq;Z۶1X9oHCn{$ct0ť}<[a<3{r7Xqt6b#P+wЪJp9ojUzS[ti#ᓤdE6Zg^5z=KKrhP*/-뼭4N'cԛ/.lZi~m̰$@.:Io=ThNo*s_*ӳB߄.`8(@C Ne< ? >[ &|;vnEؕzTCv\a2(U{;jI]~3ﻧ3ui?Ll]eؘN~2_^6;,Οr8u^{S*eˆ0˦k_Tw(u$b>zY=Jzk&@_rПrZR݆,QBDI$ ȉ^uӫ|~>|/^/X5}snz'L6mK3AP-#*mmM/ ēK~~׬L*?2JUu*bRt*DĊ RLk<3P* ¥wKԁfn@iLoV: ٳ:[-lm]JO㙚vrݟ?0@ a<⃰PBr#MyNŠ| .يg|pKdj>h:lWSv+uQᐉۇf);];Л;LB'5"BJoMy!|Ʉݗ K}ڇ7Iż]VJ`"Kfv_>Ӭn8׿̧_b pm #A;e GY1mRCQCZr*L|!q4VjTQc022*aTQCjC5*Ty(%EJ [p<1 T<ؑl^󡒥hծJCRjg#:ĝp!&*hDC#CE.D#\QS28| DTSqn<$$6 OD _x/WruB^_A㙔<^!@8 t +(幱mmhGyJb~9r\9yE# fWˀzx4"ϡy^"秃%z֟EM,P $?7aqZYguTshtD|`IicujvҮ 662G,:*#'-Z' C{o|[ri>TxrnTq< uPZT! 0 1)QF9v||Go <]O~j18B@bDΎ8d|( jSU@Uxkc)-KDaZt8ÖG!@rr<ð^RLԬN"Xibe 0`[AB,9Ѯ"g5*]}T'tH[:!2=TMC`h6uLsR1qR}+?Hd"u`[#-iWs5Pg;cMzL 9>UH'p@`L@a qh;2t;8^/}ҼCT:#::&9l>"3=|4n dLv0Yu;8ZhDC#E09 UE2讶)3@VJ #GãMˠMj>Txƣ\>Ї`N$k^UTP5۬Ƽ॔MDpx]4i9k5r`[X onEArꚐOh?Oo"'Ljsytĭ޿y&zpJ Q =d^F4GBf躐Y͇ppEcyNqcM(V3[H&ꂁ)ł"Ŕ' oA+CMV=}gT,OY.1;6Fqe>M(̯gftuV6`yex(bVRo" ˢ] ]j>TG 9kaKx̫# #N5GգPIr4#sEf NŮPf5*Tŏm>r9!x4en8AHF #aĚi TDbm6)rNdY͇7.ajPZ}1X~r#8WPwfgm_1M, Θp3_h(cyb5YBYxeh`q,VlڪBcBg<ݵYc{8L.gp*yxرp:y>Zd 0Qltɬ XrGVf躕Y͇<)yP+TooQH1GJk-={1i'ꬎl꧇TZl%>!je[h',pO~WfΊsOb6RYRc4~ʮVܼUU籇Ljkn7&)!"ā X(dZ8eA-+AI[bi>T̆Ō>qxU&-E0{dRbwm_9[/}(ZC-$8N&K:=6×=Q%O]f E~2әn Aa @>ގNSyFNiFLyq2Wn&WI`X2Duc ߥ"覗o]mUa]Lr% Q1v4`a|xPsQs枱v0=:I9' X ]5REURwq"q--t%|69îbOP$QФc k}o]r^QmSYRr9 "8i#.׹Uct([|D 'S i1o0>;M0mTܗOZU8\ qқVj` t셠"K!ػco]WriS3J?5=֖;cEsQsuDxḰ~B_eCXAiibo"O?ao\]Apy4$-Xm x8EF>~~91<%S=zk0FL \Ռ BU5Wvr1<)ԉ U$r@% LШ|sfycgA#Z([L1ONTp-!gR)smj!7+tx\6I/&w7qB9̷:F*4](*ԀmQ1\b߻Uw RR6WN$0vqK1 1F E~۪P cN?(T6FIW gѝ:yDAb3DU]8PUH؇ 4t914ã !|<E!HU1˶i_q\x >f5?BYN=8л4 :6A$hM4sCw2Ҵ<}wC}ꮁ!uGT7šϠUXsAsѮF&4U~m[^ ޛK$$T*îف"qq x1ⶠǬ =^c Glqkĸկ{6d&Dز#}vpAp([W|"|:Š'/cD#qs>ICzkpƮҪJ(8Q AȔI3E3nŝ3E}9(ЗsPv(jgxm'*N\0JXVp o8fc1F%tcAoMaϞfQs7 g420_e#^4y@߸\>YBGO}=<#'Rnx6cyy N oT û]mA`ec a^]zx쨾a9˩bGH$1hPF|)Ur~ O%=<h!uGW_jCSGveP VTDz+Un6 ׈OVFW<9|}ᚺrAW6 ãb> 7k$) "+@`2oX|/1\ 1+Uj͊^H949$:e}qFK:3Unj~dT_F^Ÿ@a\TDPDq_qw[{adJՈ>>eȬ02{3k~F5EWeEEODYk^skk @(ZPYO;LQQjaUx˃$:)E[*Уq1vm7+lEi|%a`ʝ;bUޞne K%U87/}E]k!uuGûa|G W88_rz+ܩzҸH2-'߃m$M6e_L6/?_WNxdf?xf5+/VAkVc`ێize@:i~.>&$)55G3Q hBQ1fWRcH ׶2-&:iBy͓9"1%/XC7{x{PzQgLlڔt3SऍTp0,m,E~X2ɒL'q2ĮCkĖm/^ QȇW mAPoO*X8o}~߯Z# \we fƠo]㟫͘5=2U5p6%iZ4͂@7l[Q.1ρU(Xc 1y:";|Guu)X^0#XJmK:z'zS0wq:P.21R4.mjq "ZJ VC'ݸb&dLIA ۥ$D jHFې5!|ߍo]j7s|~m n61+ac`$pfO.ngޟ1Tʖ8om6sL TK%j~:mxl}? 2"noy 2[{($5{ǻT>  eu/~Q^F 64' : flxl%5y]0moer:S!aiV7S7hfUxoNۀ24 Tԁi``K ,N!*ܨU9C74vV0V&+1+.0Χa[_ ]zր{w.puԌIWLʖn,o l|b[JI0Q[>\1Cp"M%I98.1ۆǡOdcd9vo1 ϳ2K0ҿPod4.=OhfǑs7>DyV\~J] |@gZJmpoSRI>4K3930Ru9&WGSxćPG;oqzx߇ff%c?>=8ضM,^un\[[3&1ج> q+ ǿfw|W{~yO-BrsxFWHfĝP}B𽋐0# iP:e{evz#B Q}"<=sݭJSnG&th9iCj<2[iMd.qpd[.V2ly'8q:IkZT> ka{s\oN_NSodᷓ!Oquʙpgp^_Vm_^^p#d,vBF ⌊_ǷBI*ţ!rSU:є1Q:/?.FwBDj.A5"<gyB;k1vRDc4ap\9t.-5F~;]O.UOzSk/+up?vC\SZ]KRj5XnvPYRoc&<^C26{F_B2JRRJZ@dXt5 c:%<~Q< ?P9U s:A/%xzwЕa:+*fP5UЪHt"d`+cO^ypw9mى/U,*kvi!Tx&T\ҖZZR"Z[Z ӏZiީ:jA5mӸp:CpPJ$"O)L.,M%m|YuPxL|7A~EiAj!)4f9/d5R mPa~1m$᱓ Wczn"My?[.8szw#BLZt֍qח֭6-YJַ˸tzvxB9Cˠ Q76̈;+YwmE7"* nru܈VR/A C[Ϭ0-,)Lj}b.l@|.O9QPP@3f>ᚺzz(eXyHh< O(~ksaw\2i:}vtBϿCnZg =ԩS=C^]pϼY``[QmCa7G6EJ&^1FG?ڄ/p-rlT.'wM:Kg+#tfmr.GЖ6=fL gL`{݂;HPYaT̝-Ee:,NO81xdeS>#5k4K-hB ''˗逬8ϔ| Z#,lYDxͣgQHMH|yr{@}E?b{p&pu;՗*]DiA4k_tr!G5gs1R16NMҶD D"TRMZ_蘨CܫPZ0!Me/Mr<=Btu9Dt>XpTc2Xֹ9.Mqӥ˼`'ah~ܶ&`w-L̈Y q&jks^sHrz qJ6䌗Jv;>6NSSU2=]6r5/I/ol.T{[ ye3h(5+wWn hvnHcRVF\ /mn_}ȍ ا!swQsNg *mf#线M^ m# Aè:ڰ6<-t!Uhi~1˿G~ j^}NT,SSL=|WLօ;,`hӗ*^Xǻ]vs٬>M'SD{%Ӂ9,C7mo#AjAiX{yv|/h'h ߘK5@p[py,-r(aR;ÐK|.4+ĝg0AV4(>#t,a4OVM&Z<>꿗\b͜Eցj( aȻ`eTdF$L^awEm&ob~]/ME8qiA0h*"P%,O?&"vGԑ_|ϰc<[χuջC %E,be'/ˁ5gԗ)ɽR.H ku;} K,V]y3i A6kU &utd R#nzxdAåi4uhڌj>֋ߠZI0sQ*ߵi}%,!˻R{)/YX\Vpb+F 7}mo[F7wW(cz~F7%RK%({v$sy"]ۼOR2VW,&Qo9S᥿^@vWXa&Z /Q)ՕBZ1RVYR>QF[e/uk,}1thdŨT(bDd}g9In [ A7 각n409bBi>tʠ2ł +S3򑉂u{ (,~傂t* Ɋ*Lڒ^bȐDfA/#!k4mZ 2'i$`_Y덅 C,B"$aяEVÖLsL~w weHu_AH~5WCQN[l n#ԍ?jʴnj X^Yani,!P%O8\^@tr09@@% WS CB:Vơ뎄Rz|RSk~V00Lex6JE' G GV0ewoÐ]F7|:%a8 +#]c{RW <_,yS3ČYI‘1cA0 _1f|ף/<?L >K)Q"JRԌ;"lou_@0Tȡ1^kev e %oe@7mwSQ[Pt/tHޅSWb%)(˚İX]"t})$zOٚXAo^G~ʸ2cQު)7j׀\vCB(wq*K}eiiF& ٕe<ȢP̟8D2=_ݧ-thYUeV-#X`ՌB $ײdl<?&S!fQUN`|\0")o!$@|}:y909naEBT03Guľ'JRcDHmbҒG+P[KAob=z ;4rB$KQȌ:d86Y<ìod쟅y|U{2rȿoSllOO u9y/_`Ϝ~ƴObh:P獛NMW`Ϝ' )g\ś\϶wϡQR9AY組)/uP#^!)A,c٪.)j_?Gxz{ `ߢCnPd: Iv汮N56IpA7`vЛj~}e5eHQUyeH(Mc*dj]2xFxU/c(@ QJhQNrK rmQt+xPߔAoSコP^Yɀ=z+ b& ʽۘ6%}ߓ5a -ta4r\GNpB%w8/g<-thd( $B$8iUxAdjZ1A3iiX0i}) {l?QAM6KI.~kSh;U$ pgk"LXEKMJ)q $c>G;,$~Oa=k:mF0T ˆמK]`/جѝ92qV9".Yoj4'-L8%{p׿5A'OcF#mjJH%"XN". z_xTL0G/~ep oơvǡAg`jJB^M/wOs trnj˙ %`](S͚(r~l!7Y}TWyCjcx㫶R2mCLN=iV4\P1`>T >hqo.SelΟOd:i0z 3pKHMHJcZiPa:$pcEV"vz->V$C .ͺ Nn,b\:*F&-*Y8*F:gF0hFUvn[ &%~>>u}b);lc G2Q[.=V~z_V0Y8*U$պ =A Z;JXj|xsXz hFN^:g 7O>TNq JZpܧC5̅M21`>|ڍIzMU (@5gݼty4oN׈$]g7;2ɯ&7,lQjAe10n>G_xz3_? Ӣ%LiU@[ɄfޮZc@3$>J7+"kp*pKpH]t=&r_/*FA-A1Ysj:-B b[6Pj8N9A'J SPM~j1-摟T3:fWO[mucVnźAY?RB.MInX"EXpiE-h{Msɲ /kdq1V_zʰv[]t }|Y3XpAeOf+'oLˊbqOs7=gw*v^X:|k_}᏾]FI/&IhYtWTk.;v](t (TJ>"[.pA&<^#d֢R`ry_9vV [R#iޑ /<<)_BthgUӉC: 8 (Q"@ϗkm>`tW:W_N5ӯ(> #Z2<2nP[ |璑} o9NV`gkݑOwBdWHvF)_*kԯ9+åjҥ%pzv*Lr uh'Gp}kFUc `ACLz̖B4u͑?6P?46uro_~sLKፐDUC̝)x) {L+׍imoY%2n:2 4oeQ[Sx-@8 Z3CxK)>4q'uX 9p Y:"Yq礒r0bސˆb稔ӆ%U=n@P(/r؍~WJ\]櫕CPg{|j vWkÉ"gm:JksdG +?Vߖ-zq6 ,>2E,f5gE@G9 >:̖*F!JVҥsDnwL鈳R k4ZG[KHƙ;my#4r{ ?2Qd7PK!p:muJ|NQdndd Ix9ݲ#SAb^P@Ɂ=%<9U^=U ="uV 1G"Ӫ jgM>ݭFWikιL {Lί;sǒ^1AFE;,YtytFѨ >b*}ΜQ-?I_*KtYgFu%zaBڂHj7 Y'9YhA=0,#'/-.csK\!̗得.1/\(Hic#$ \'Q!#BE„"E\th{>.8緌f>?{r{`FLwRw3%ˇ\`wadj!"T0 S,"&32tmFŹbQ%õ"2^y_Gɀ չep2gDmpm,OQНQLz`XF;Zc, 3ap*FMkuo;:R&H\%06DI ¼ !԰ЇUTRZ$m6@"G㜀jD2rB> Y=0"{r S$DW!崹e1eYQcee\\G=@Y:Az`XFN`ކq8M)"]eXDUZOo%5aGX&psj F~L8}l6t$e^ l%ԕ3"q{StX}}X}PUB=GVb [ގ̄"4^==sr^%DHM86)X9>1cɀ?:/'Cm#*2oP_sPPH2Go3,7綍GQ!c}geU9sЃk20m:ۓ[FPLT{a9 pS79eԄi*o;jIyɐ4#'/x6az`ENhDpNw}CqAf,ADŇ ȌW5r3w9TDwe\x䜊Ԩ=0,#G;|E~t)Sʾ2) ]U|PSMPE/a6 3ER[:= xaԞc@];|ЍsFp7[*:.<MelԨ~"gVf aۓDσN>wO;]Vި+&ejHr E- )iDƣ46D%ְ--Qh" ?^0n'JE7bG7kIm\/m!FQn\(f /ϰnH8L-GSePF! b~z&+Z?(Y!ha㽝ȕTO[Y^xk3v˸z㎰{X`eOFkY-h D$JMs?MmFHpX%G'(] !vh?nJZFJ2yPkFtȻX5Bm ux׀ 'ǽF7ptNkִa&bG""VkY%Tav*7NEE<]e(2ەrWTŁ ^NŴ>w Wj8̧p`R3tPc]kVODӅ:~~ʹ7潈/WC( Ns4NǮ7-ɲeε[*o1Y{oTzک m\;i>}T3מc4LNyٴ(E5U0wh,ݢ'3U֡ •<Ѿi+X^8Twr/h>3V8J~syj7aP`4yX4+ܦSy3LjY`/9HB>rD]w{nro)B꿯Oi{ıQE]l$9r)?g^*{`rPشĚN K5T-OJ.6^aB9~Tiߐj¾/u"ìkɢ|yQ}vzf}W]ϷѧO9+T`,݃4 (8yhe+X4+@ؿeoT\b#xR㟖7oT.TmU9?_=Y(7UWd  ARŭZPkXxc~CРcjAeW1gY~˥sªEN'W(T/ZW<}ˡu|S7 [wf+F:sMmNU9s6S֞ {`r-{y1h|'cJZoTܭ C6)2}2G<ۚ{..<#c{pS||j:32`l"@xsU,>/n]ՎvC"FxY-5_,tk᷶; /nF95a;q_;>WKkVۊkz5tuU[U܀=|cCB_5yAӸJLaB *(q G@JA{|X1-瘛-fw^%|נD,?%! "9hcSp9Ay.c,v|7iR/d&O儙AnrNG˩"O эzdDZ7[~(1*) !:scFD2onA[WNN$)OB, NJjս\8JBlOqcBmcY% >ܔYNxRGA(@hvv<ByCeW(-%Zhay 6`7`:y 6I78SX؈!GJ3/"ቈYɥߵ3<Bꉈ Pyqu 3? &P(<4$h,A)cN#0p0?B 晤W{fD)DC@t9Nc(`S\31"0n pk3S/9BB?L`1CSإE&>&E}?C(`^?Xy.% ؐ\\eJTϜBJGy$U%2?#QdYV7B NNH?_tSU%z %8y LB՞ I`%eLD&$S#Fk s Q%f! B^UG΍Lg7Gr54E{_&t.%5ʿaƭv!D7B~ Pbř #H`6J #Kq|?sDz@A !J0/*rذӌPq4LFH>'cq!L:X e?;pRMb2x1#U~\iں@p˓6\x! D0d"O%흂En;![]\烕}ٺ~J?d1Dp\3*hh͙tPT{r%#U??NOyk2'$yRE,fw. cH)Xqؠ59m։Tv":91t{}W_ο߼xo~o߾7_/ v.轻^`Z{-M3͇4mJrv9+vO"k_(j n'cw^L«{_NOrWΛlmIh+m]U [{E(x+Pۍ~z!Y 95+ݲ!7WLogNV4?/Z98՗Ap8;+V4OεfF]kkՙ(~>]T^V `_L]/ލ0W/?{vfL@|3_=3] ߷wE۪;;*jQdjxs1M!8OŖhE{t3QuuE⎂l܀F|Lxe賝!`[_Aҟ3T`k} "błr$phL/ 4DVC,S>0 [^,*_"U.%ßmpW_(K a<'^ ,5?In$ `]q>Pg|pe9ʚ=wuJ1]c+ Ψ;5ƒXлS껞g-I:pź KF+B+A;S0؀5~{cxbHqr.ǜ ԓo6B|3Ee>4:!D+ӈK1*6*=B@a!<_A` UuKZNJ}P)*Zy@P%5 b@Zg-=e^P(Q<(8OqK$2'X)>)`]̋rk >/Gc9(W?<)>=~X}8@f` ս[Ɉϱb b8yz G{wDiJA m!S3`QZnʌ֡ = TCMq. 4䘂ƈR{@#o `bkkQx(C(`^R0eDq2+P1"Rzk@JQ G Ld><ӊPceb5@=w'nhC +]zoJJ7:U 0.Ķ_@K ,6 [ ! $G.Ȅ)@P(a CKڢxB ,tHJ%F*49b%p18ܥ>p! 0O0U{J7'>j^z5j RG#{6ny|5G{Uʹm],y{=[75haw}^T TXUW v_+}`ujW v_ va@jEXEh V4ЊZ@cb0n* վ[Br8: ~ 9h3Q#s}4[Q-a@:< OT)첽*~oCI.mcoq+vG҅i.PILˆ12fx[ j<$i__Fb%ծDWl"Dm5BRa-+Lx 0&„W ^X+Lx0& 0&„W ^a+Lx 0&„W ^a+Lx 0&„W ^a+L+$flU3|܌6spJqcq;tKcŐЎpq1JM֙}qLcB7C=l^g*PCyš`]/{5<.#C9=0È6c^f7[f7[f+lś =!ArUrU@ U* W䪀\cX*FK /ߞ67@hYx7F"_/G?&oo.DyÓW`8Q`(9;XImIj(j6΃Fbz}ׄ nu, Hf]sUƞ-J' EQ0p!}e7k87%zp|7+HuCۣm,>|J*gg$4vr LTiFOFĭE(\lg~tYizU0Rn\==FP1EIE$U|>ؐNC]M*'k&.7?kڟ56$ ԫpR0@v^TH qaL ?VmG(!!!_FiF4l'M )o<>hYrk&QRHYX&ǛJCJqTQt(X;XqVD h⚻5 8;sg7랁]՚'@h\zKa艹rOvƔ㰒GWբvd#8Q.)t0`VX$`;nL։X~9s<ַQw2=ͯt̞f3jU5n|g֍ַj#^{Q0\ )C6P%|WA-#cTs0TF#L/Tjư>"' _oYoLrt>puܶSޓikYЈ O`@RFy%&sW)8E{dc`v$qlZFq9-u`TQg}!Wh$P+UG܂P+ pê0.v UH% c M7XB=tM,櫒0Pظ.:"YCo6q˧_eB<]uT`0\LC!D݉լL!']jx#=,$U]O޵&z[UfW䆈GbDw:F"j>7OsK8'PZ>nT?@i3 xkps{>*X UxoaP$ oWpksm r,(UI)_lNW_ xGr([) HQ\WH'S^3%{fĩ^SK ! ή@] HpL*"֮ 3 wZR8` ( =x0c3aX_~}r̺ k4Cֻ(Xr{P+Z1*,hLA>d[~ᅌ2ޱuob~%3$=۽۬> K^=|.MIAd[锋cQdy c*瓲8`5;2óMe˳|3[,W=}4kuŋӦoB'sa> gl- D6 f80<5Rc#1>E0su0X,yQISFOc.G-kԎZu=]Mq`Ѹ ulGgU+/ _ י\~r߿}W~^zwo_i'؁iȡs j).{2_OмaTM} e%w>X,d_i( \ DYէCr0NG1z}0,殽f3 L?d~E0 g _D >eu=Jl_Sѕ# Oʯ F#뛊׾~G9 mV:mGgUrdyL\ RhreoVIQ("5HR~@5f8jE&TϞ]Y9l]Tm9W8 <3kT˹\ֺζ*ihh;q50QQdEV,_9c]!ړPV7]Wt훂Y \`\C{ǝYl&bkoa+2 -B IꥍITD(KIpfY,}Xf8/YCrZz!閑,22&́ &Ʒ,< D[)K$kW;o޺w7]AV +! moz)E3ݯcS՚SE˂-seK]iT3V(3xn2, bǟF2==>VSMr>iahP ,!žkz5$Onʅ]T.l-HPEwն)]3dۨMlO ֣n,18yvTH`ch՞<]z4‚S\)@ܶ n}n4Qò_4=zۚlu !1g+U!bU!Z?l>NpQMa1lR.βB $Y^'ic`<ϼY1Lj.bϳhZ=ꁈB7<7"C & sHD%kln)5|n*|n*|nT>7LsST>7MST>\n`魫}m]x>;x z6^ȾW}v7໗I0 ?JٛPJɧui0`1p>u8s8Q$Yvjj#¾:/Ee\|ćoK&Ί"_坳=8:׏ x3).3UO"0?ˍ'_h4Ă.;͚QY_ o$MvXA,`Uv~]LZhXIo+W@=E i\{Zϖ˭TbV9Hc_?B,#t\ WBXB#!\Ƥ>W|V+kb黗qM^߲/1C`r^Ӌ36*ua.7{ hXFj [w?"cFd(߃O}柼&}R , SJmΌ6 ұx#yZm֕<ͳvK^%=8??2OS&WO+Q9YQj%!V41c 16pzK))蔒"'^qw{l˷=%}nпK Kk@Aa;kۨg2pRPN1kCDGWWϢ7eC#*<5=@K08Υ^R L)U9#{$\.%6!`BL©1jve*zq5g^m:y$Ny8-3Z(7EA*/J-ʑZ~ ؄ 3څPg&ƌbGdTaU.XbޓqdWx7-}v36xq!58H;I6%RĦD%}*"sRE#EDM7{,VڭY*(Sɝ1qȧziY`2QDN:ôکcBuu.Zf!K>юJ~\q֠ 6NX},>)x.O-X/n4̌[J<[q[ZJJ *i+n0ekrk yMaa?Lèasʺre,>SQv`10xq0z.-2H}Vf'uZ5A*``VvŰm9w~'hX)Z8`1waʽ@ :aFhNP;˟Dz&p]&f\Kř7G~>ZmV7T۪1y Y)J.'$ؔ) KE8 eu\K$N"jܗ,xZZlYyg 8!u|wCS퀱;KW!+I>@ݓڏo"KUȨA)`m Vl9GFҟ\; \_Rh.\j٫0^`ű,{i)*>)i3(L*E1`r(*6 ԫQ:R" GǤ"9 EhJD A,LXGW@^wv¾! 䱑usi >ŗ%ZYIID9STθw/ scꠢA|V1k`B@Cxe(@!&#Rʅ g'//FYM 0"[e5LNxpnl.@S&`ﻵ!OiYu?\§KAg@ÜE'aihlMExkerr\8n3%Qd͗ {g4>e(ϗʾ||f|Fj-H/|0d00`1}> ]Lb8nx>f7aqɕtEuZ RH:N40pҫA.Ham?tVTLV7owqݯ?՛x/_po^5v`\Y9~ht=6_мaTu]5e%:`1}s3\!'ϗ_^޸?_ȟkF\|?i*HKg Uh6+`(u :'$ _hhbj{HFW*?j3^7~('Pg?:___)c)PjymDU+yCH Ty@~0;w _6sJ}/9I쬯;ppRb޿ʸ:yVD.WTd[QQڤ}U1(b҄ST=y/X]>2K@W2G!=Njbdg? ?㷭ʀRʔRAhd.x^ڔnFEeMr︔1Β>eʌ %3 me:9'Kp̃J<7Ds($6] Tx@[=Yxary![wt mpE]˭.}klzɑEr"eS4 K% ^Pre+]餋^T3Km*CA ƙARLrX&)x{Ńm5~U+_M,{@ GZ{FlQ(b{`bDq$CѰNŘu{{~\v*GY zVj" ~uBt'h=[3vN;i1uf5JՍ%^`%)|8l\,T`_AySAe@u\5J%RЈ2 aeΠFxO!D5!֍{ ҆'oHKgh7Y?؋TBY@% >YK4*5AR &޾:\Hq ,g0DNM/Uz߫-ͫXg?6kdx7HzEX$N+jWS}~Tk z'~ @@)~QU k0W3hڵa nb] /gt^NZ$jqmQ;f菪Ec^-ALi#Č6)[b{T[^ռMQKz]+y ,&g2DO`W@hp8%b" b΀S ƑmMnJ5- >/F7Ʋf|fO\?tԘ|w^QvC9&yTH~r&0 /ʴTSK<Ea п x;2;}]W\sSa.7ZX+r [Ulr9L0+'~ Ắ80,zWgxbwqe 1H"އ-f2as՗*Y"EM;r0 ϛ-Nf`\! P{[rj;J)nK7.ϦZY4RuW޾:Z+u襚񛍿ߟo3bn3Vqn.Yn~Ԑ{Ȥ9wd JP]fgus-W5^# M $T^qVd'bn6O-,hҽ; 7yW%䰫 n-&],smv7947. iOc 9q 5}}uwdFܘAe8atӳE␼pxUzdJT69-J-AƒE!twxW;t@9DŽRn&x9d "0urbrX+B"OtSU@{ѕ˹q؍ihhg\1h`r䰶Ubߪ>L "/'ξ.#>q6YE w ~ 'O=MzILqw> gGָF .>Τj,K-x2eRr #ᤛGn/I'+L6*Qhr(rP.fuG˕Ė"LKtӊmwD]EG%Lk.DwdڋrJZPZ0FlKWALN:7 #$¦Fj*eq3FчvZ+n'bMb ~}ޫ-lzX=B#*<5<K08Υ^E)R&pPSGV99Bwį SvSeFXkŁ8iss :^ie*9PK\x8K$ԫpGdTaK-o,ifJE#EDM7{,ռ6=9{ծFFzod [i4`,0("p'maZcTe8cf!KaԩnJ~ϼ~=BKYƠYnǨ2mc:Qr9Hy\i~tcd:XH.Cdn ٨7PqL'Vq+rGwꟜ<F"jվ4OK-+{Flw]Io5odvgi:c)_kD2zB(r­U[QaY`"D;Of U}=]l@ (=OfJ* " p{L-%3ʇsz@#)0~tL*" 3âҌ;d[ B' L0c0hҶwzNY=F`z%KεxlE+FC)rҧ.q3?_~Y9رJuP C>0j! !l2Fsi`[DZe|3\{Η=[j,&û*1|-.Lo~4n9~xx09XB^-O^nL?Nk<CNߟ> &5Ҳ I?\§KAg@*aS] 'OC3]StQ?^|6}Z~In5Ldbl6s7Y]bٻ6$U$Wn$95~\S$CR gCg(R-rU]տG?x3ɏJ/ޙ?ȷҴW{bMݐ7vŢ7a?>P0Q1~vFuwr0{%^g놹ZuNti9, Fy Stz_Π8*w׵FhQk◢nw7/^}:|7ˋ^\%h !; B~uu ͺmӵsSo/uNyCBRͷ\(}˾NHa=3NkK+0lyW7mrTТe>*P|?@tH”Ni6Bm_~F*d蝗7ҟtٛz/}m۴u7\_z ߄+ٸ{_,JQ-׺z%r`QWP>1؄To֘Bӹk_$oL@s^_,̬NO<ֆjCE ; ].q FIFdng witudW#NjK #G_e("^K(SJiNy@zi.ie{ǥ$pᾏXfqC:fSU0W'*)`: ~L l6^ 0x@M%q{UM8Ln;.ߍCOWj[K+h볕wu[Ccývz~Iɗ1`SUSb$7Uvqfr2IY-]iSv8/~*UFjLv>37EO}[W\J6q<2d~5(Z`;oU| :cG(wp)BSϘ8P=d:FY^1Tk%ײуܸAᬶf-Lt*~;VsQd+.nU :l{#x>bMH,׉YO1-3'%d2IX+"Le:]YaRZDgr k~O,V 98ßO +jTl=N;[Bfנ :aFhNP*Va P qQQ!Gp~s]}$\Ǭ^0hRexR 2Dib5XhرN+?>>nk@!rFAf`C3KIH_ܷrӺRG!tS&g8,qgتuTa`=Q{LrQd\K4N/ÒE*L<DcK)b΂wDðqfچs?L8V.P 7,qv| eWV?|o5&?HIpF֫! c2#J-㰘5inᝂ `LgV/#;2W}%v߶GUy`UWv̓(Zocmvsjqbwp)3KR[ikT#! Ox@l$5Z(ڌ-2m|R&=r&[//[Gy Af.߰ZL0QymP5RS~qefqCၱH13# )&- }N){\H+bψ "*EBGS Epm;v ~ o;xԃN3\Ϙ<#bH1z͙fb̿¨o//'^_ ,,t}dm]r3cd3jQ3Pi@s$&fGt@J9{֫C>+z\sW!/Vl3>mJV=vlmM;m偔?!nD V?hwN^< I3X}<0f?b>a]ZK^_l).8N q&q`RL&#s(2i%ɷ').'-Hlo^oFi%a4:*bYhKIzY<(A'@=JzNY&6EE!Q;D^&6aN:e2@p%q]4Z,uE%R3 څp`6VT5n' d* b|$xk["0[^4zkLIlĭq?)np}#۰5bH]0{$kvn6^}u0fPD);Huvn^0!#mN)jhY&x S#`vF,YStT&"MY1J/pZ5ء]c8gM"WIpmIzUi>Wݘ6\ڋIJeEvFVo--+W~VҞ9pQ7[&Ҁ"iOs~,MvMYS5^G jVMd{#eSNm”ur+D6VZ2WYlLo-3 ̡)6# _-DȑY֦LHHU)P\Ah%9hv63%emΩ ([ؑzZ۾k-wt{  =`2&#"2F2E\iQx| 3~ᷓT;d:TX?tR۳R.s~*>JoϣCfג)E 0:RDž<]~9Oƣ~lu&uޱ:*;t \g e/&(J 2l5ӥ@ӆܙC7QahlucyPݔ7hMCһnmn. lRhyñfY5C1'PeϦEָt4k\]w_\[ȝ"1@gEl3gd9膰ozPڢg"O:zᗦ  u8ĸ*5ߩis_O,t[ʥl4|t.t~3د&-z׻wo⾅?Բ)/ڤRC8UlXh*(q́"4b"Pyo3FECHN+d ˋUN {.=Kl A2yN|Ӽk 7CntdT:HFoWP *-ۨ r,8IeKu+\0!S&Apqekpzqy m^ϗX ۚ m<ےX=eTG)ح>qs92OSnZO:AQGQ4|5U6'MT6 EX]ofJAgVQ΀-bsCQ $[r׿Ls Lo:R" GǤ"9]wZI%XD0`AP,@2AZ`X*T{1^d䱍u  wQ±\++VbTtQN3n^Gٯkz(״!Q9ر( *dX6Qq 8Whn< 6b[C: }8wp Z֍ςϧ&ͦmiM@dIJ BZ^5]tUC:U`1ud> ʹC 멠YdEpu}iuȮZ;JBj"ބZ5S< :,-C 坿 qQ:[E7^g7:u͏Ͼ}o/~g/}}:{˳߿ ژɂښ_bn#U˟nQ5UPߴj&U]2&w{|}%PDiW!<~9ٹ)LvGef3? f~QI, Z(RO- Jo ѕ5 2OίsNCs/?ZNC'Nlt +.d y{ o,JQ-oTVEۛ'z/#a w0~9?A;`M᯾LF:wQU4M_\3I5\n86PY x50 d%dUiD{ZMu}%+Ã#q'5sl71~PD2PҜB#s$FQғ"fYG=2uyi4 Gةpu+YSe4e A`Z`ַ,ٖy:5'4KaW=xpVs߹ldibu}~6|NNbF[b*k2bΏlz)u4yݡ[r)"FАtFR$?̍b) V#U)!LYjGB5ͯ;QC䌦 `hn)I+"4u ;j-XYyK`U:*b[hŞۨC&S(2A! (@RۍXH>ehp8%b"XZYh6xY8>ÿ\jy</1 fjPFA\_GmN`$8DJGU1DmLqPfͥrZ#ƻ㭣Ovԭ)ס}< ꆞ(*,G7R1Bwo.;Q[nƅ-a"aP9!3hnp9c&ttF:ߥTn!|p}by |ˆ^FcBXB6z)#"/a$Xy$RN QIƽM.C[- ss99ʴ/9Bŝƀ)rt. 62( F刊`) NPF2Pc3WtD:Oؼ{ KP"$߃e!$vʲ݈;]dރ]'W<ճh( AHRVa S띱Vc g{-#c!Fs+%bgsOLёHbmo` ㌮W)*+!԰veD iEΪ⺘r|%׮v oNz84m0ҢHw3"m=+_#դw86ோ)ܮCJOj AhSfMN]-͒ ̡d^kdWY (JևM߮ۘyY~Ɔjkea)Z"gNOncr2Ыs}# K@>w?n*s+__\Jx mq8շëAJme >xpb}χ7c*LWQ.6Se<6?Wesw/[pNxl2Uwau9nJYZ1~Or0Asƽ$ES[0 )"]8c+#\oõB]wjߝ{ʐ so,y^iC&ų[;'q^G~]?KlY x/eWh8zH)R#}6x3[ӱlZFq psZSEQpRIB9LKIw$K"lي#2\*밊ƥؖtfJEx#EDM7{,|;S? }a>+ gD ;i kR|+$ xl?ѽ=!~'z3}W,%=K&ɠB#-D1*Xi/D}9J3_d'U8gN.'g G7o&i뀡ZT5\8d-yruE9YXYR0.QZ6H*i=:+F#tc):޹tPm[P8{ʷ"A7~K[.3o0\\dd];?eٻ6d e #}0kI>CS*$#އD3G%ΰ:Տ5ص;kHE>ג)E0ûEn _Dp8`k U cJ#`‘ ߜ |ni- zܢ\|̖$d\P-.ϳW -jƔ]͇39=Gt^l8WzX01*J U+̕TxG[*|jq\_NF224 Wtf q:/?R0*MnjM͝dV eIvN]06~#TVTRཡɐ/7TnSݾ{ۥ!lG4: Gҵ9̘j>,dVq8wcɖZ !ǠrrafYYlPj Yl"\T}-\B%[fgյx595Vhܚ5fr]d^1Yi9?֜S<՜S|'=K,'ؤv~{RQAnrZkk\q5/j(޵A1f$vqJۭ8F9a6fǟc4ŧ+qns3$wЖ>#,`Ν[ `e:v9:.:Qix`,溤/Ct塊C/+^νt.b^QOga"! r#c{}.CѰZ;ùaU!=BaՃӅ 'zWۡrWxֽcwi*t|nXJ/w)_KP0pa2PTi]N[)FDO) -#WLxg'(Hϐjl["Yi kG F2D1!42' c#fHp /×!,:WJKbCU]&\@ [$J{GL0E`QK/W>DcRs P*{{Q#nY4QqXmN&q!Q _KG,,0xBFBn2r)m-΂')Bdy;@W]opy-K-[|<+%!`* 1HWsPr/}yvPW\u)tL_UOfEwZɄp0 AP$@2Aj=*0\b[;RYºɣ0[G ǒs$öC@(r'Xθ|`'\k'H 6p p gsmςTLkSݘr'YZ/-f7KItyƒTTmiG1btult<3ݢ8'9MΪS~,@TOޖ^ގ&Z7 F!f6_{E.MȊAyueqx&߶S:mݐvżaP ʵLTL;Ġz4w19^ $e)sd,1:,)U5 N XUqz7W.޼zM?WۗNA=_݇k]O57*K׺YOC9-sW:J=7ۂKcחU:^ oZN9J:x['u]O1a(IG Uh6*Pe :W$}aBlrR_0/7' Ο>5cPſf_݆oWQX -ZuܞН9 Uʆ@wˋ6&w/_c6sM}_(>zgn Um о͎;}`~:_) e@딆eJ)͉ 42<HR/miEEMr︔1Β3yT`OzVxmN?ͩ` -n(ѠWJh+n510 f"9G‰uXTwwY7X_XkPϔPG6K!zƌƁZ 1b"i UJ<{ _ھ'-}d)Q@__)0eyX(Lȶxk3S 9/K( )|1A7xi>TD({No-hvbz袼0/NEy'.R/l6CPNi-N[x}Qi(yV)ɓK^Nl'Nf;a1:^UI9bZWOidPy6ԛs/l:U#2л?5"r\~DI)w,\hZ]y~9UHI(%;~TSRN0f#/~kůGQEK,I#0@+gAĜ4,X  (G8khX)8iU"bK(F1{9A k6Y*|L,q1泒Zx|SE /O_#VdsC3-e $,1]ibKJs }]0;x_WLΓ4rX"m Tq np2?_ʙ/|}Z(=H)_,|>n*[Y|zz f?NJdV҅2(:o;30{-#chn"mRg(љ$OҶǤ d-x IM XQ;7 X([XQ‚3*%Ruukm<;N@UB2'ZK(Iz yIEt$L~bDN:tCn}FY136Su4H&$!a; c}9l4 [ t2310BB20Pg($^2 YobA% 90g H\:Tm ZvIZ'iMJZ%r,X G>|}w. 9]f2ÜA#϶|^nH9Dn9QsF˕Ė"Lyo#:)j:b$;%#(]SlKxW`EDgA=-4a䐤2 #5ڲQ!(8v@NxM|K =8Ѱ lP,a`=`2+a4q˲)8E{dcc!Wa$x$[f@ w 7EA*꬏{J -H *6H :q `$5S*GyјI&rvQ>Z1ϲO􄳰D 3LkJ؄VHP>6?NƎHƆB=:(ϻ2_FNho`P3w S©^(`J`r|(TZ7(| i)ҿ=_.JIV[6$4Z{ ݙ*(3U)98I{C-!_iB\\ ş>YupSSkMh5_hS tF8}\n\AYeCrQ>-z+ +m L\y7vW5acf 1c5WZ|7cKfMnՓ:MԳL~kV]欣jaZy5Mo3 M)Æeӭ& |zccTfp\SuSK1ʌm5$mJnƮhZmf@MN9@01kV]&UV.aE#iV@ɦY&63E$SF}3j[Mz5b&HD/RձVmz+ E5%TAfc|H3L\wEY\;*yS \X0nmB]F1 eZ]eTLngfwa]#M(1y3D#$'LIeR!9'u]^7ޕku- `>\ ''~Cz{}zOz0׀čuo焀|; Ww-Ũn[KEL|0m35AnߨP,26<dUB !^^b]nK\;]y8X#~dג4v:։=x0䢟]Fܥ{WqЈʎ.WPQ| Yb"m(5*Cz Y@SJx->TW)MY-Le+z s^UJJo}_yؐ çE6ҾŌ2lOl@RwWA^oŧbXa=v_od,%K_ׂeQKQ^AzxEsHZjْyLMw]fj{Tk]=n]7_,ΐkx̳ܨN:{r@)9O?Z?ȱWռ&S4dZ&\}f4]JM2UZo$q@,z6[-*zdipARV;x(frKaj18KӋx#lG= ݾ(:.I>v E,ˋ5wf_|{_h_Fv<76Xo۳<_^/k\lf Bm#FTxuov녝!9bX +lRӃ 9KثH_=LEꉋ88v=CPg=y?"-Le+yܖjm 4Ǽ@BNM Βaئw䗿;d7Aj4[ŮnnmsػA76Z:zيnQ FJ_obW>B'.N!_ϧ/l;lwuN< n[ihf|3ީ}jE/Y& H_]$F s7Oreç8pӚߖ@K^IP1mYOK^-6z)%sme'dEj*z#BR2&Vs@b%w$j};.?y+zf`p~򇵸(J9^@( &ÂJdtP3%+'UZlm׽oS0N,Yrp,!QJ`J(CH"&)āS1,i{;n+!q}?1%2`u2p%c,Zy Q \QN "&:[X-592,w9' )b3'\63˒/; BOJ9I_ȵL-j8K(SRPEe̕ @R*r*sl%>tI+39F(JI8E,[Wbi=@#͡H|/YRN̓BKc<$npnC:ob6` BV0X)|D 1caUJT5%b<{*2l`62B8b:Q9~o|5G.57p.@ ^i_c1ZZUPHŀϱP=OmVh 'W^0QتζqU8x$oS[GXGk-,_zlHOI@-0N$Ͻži\2Fi%-XC0NHyE"g\%M ]MW3^B@tdv9(x1)# ⶃ2[ *Ҏ)[ K& 0<86$2u-sH]Q2O#9&MS5@)䤀JRbN"AX1y6l0t SGTą%XCV$+{ !h) ,H(E⤁ R[Ո)U`&hFL*E䄻B/^8%, i)l:oAVbQI2 gʼnQd$gÝIoő]Ka[<+,ʶ^fT<O es Cq2M!LVY@CRؚpYA3U0pׅ&RQ⼵0pxfwM aeK8h2jb)` lZXSi ȶ 2IW~27%ASA1Dw3GPF`3pZ_@xWpPS FRt@(6IץV㾛.9&% IX^68VjD&@H/Fr'!BIRXX(+vЁhv6#X+1lJ㧁 xF#j VnV1@/Ybf 9' WaB; B 甇=$s`ӈy}g+R%Kyhs@ NwK}l%Ȣ x  BR{@XiP6:=-h=c;&*xQETAu)fT@aBAR< lN&e#SPqVsL h#:MLYd5Qtt-;#"DI‚yYxa>z26҉蒤L()*?pfH,3WLk$)8\Yx@mJ)%*\Goe",Z:BkJw3 abq'>/ WV%(A0V B+ô >9󼑛^ t>^|v7>WWfU}:Ix0ueM%L{&cvjR`!ءs{BŮUƲ(Vo%zs7hXƸj _Lrp߼)p 9p7%<%"`vrH |({  IsmdSD>+T@=@y!%l ' R.ЃVz uT S+ADsJ{R \֓D`V!7MZ -PPQiQpRRȤ*q0 5Tm),A2UhÂe+804* `ciM#Rc5z@.sJ\d= I3d*@UceOqmʖTu!DJJAPr$Ī8(ӆ O (kj`)1-,02j/у>bL Ǎp$K8ޔ{ LGXPBXℝ$w+ /A8\0 榅5ZO>D _ R몈00貍!":ff%Ф!FFCv]XuP¥a- QDy+P]M銄Q#fL00@5aVz,wqlت0Jj6z@;8X1>9oXv~?~u5 9o Nꣽe*X_os~}Nk]'=kHejw+,@WJ^ +z%P@WJ^ +z%P@WJ^ +z%P@WJ^ +z%P@WJ^ +z%P@WJ^ +~J mqfwvOOri;P (9N>!?y7Gm}7kݶ{3uUJ"xPM)Җhl H 13H9Fxs(o,=3\NJ3}6ksά|~%4Į|~ .|1{ޙ8=k=w㜗?;*-gZ{qx3892+-:GUsBxVyN [5)c0qx }Vw3Tjorĺ3h5h;( =f":Xe=Ng ;@Ji{p>>D} ,L -%_@wˆӳ6eWW~8S^}s|b-5Czm}1\٢\W.nA9k >Xu7_O=g䤐9Ac$9oC"e%e7J$ٵLaa 5Ops^.aT32Ff/-ƹN;vO$ړʛ |jg_Oi~]#9XO0E4>itO4'GhKyY#-G5!e~r1x5;B|~^9ʑή[9\'p+囓C6O8gP{ԫH|gWX%]}wEŷyQ|sO]^,=}a=,>|z,n9̷.O#9Z|{񓯞p#C[{87s{ϧxk/&Y1_3t0apG2 K [gR],7}o`}o`}o`}o`}o`}o`}o`}o`}o`}o`}o`}o`6v`[O#L<U\K*-Jxo#6xth66w? 3 6~_:Q༵SC#\VVa n6pC%™HV&Se~XA@JdPɠٞoIÞ9އb!}'TG-/L Sx}Ybjy]zX;{V8ֽNI:^'{tuҽNI:^'{tuҽNI:^'{tuҽNI:^'{tuҽNٻ6$ .car6 -b l0)qE$e[^~3ç8%DRjELUy9O:I<'stΓy9O:I<'laQtRG' ÕKt-'FIUΓ~yҜ$OVy |ܮL)]|AZ}c TE+ M.S IikrvvFAOÖ5aҟK2dg"W m f6ieqUV|)/Kzqb0<;Kªb5.^W<~P hr{7Nü|+0E;d{_k2+bT+_L>a۝.}wϠoOoQr2o3NE̿iz|{}"JyX{_^ U.Oa7ooȽەo R?AsS2h`Uwln&1CjtaRi>3__Vw533sŲW'{fsP,ZHz[ՅIAIv6aI.M7 x.$r˭No( VWxGRڒ(~H=E`3~1oX1PFԈbr|Kɠ_zO"W;l6parߌltSYu$?rߢ,I57}UWE~PTB=_+'Hpm'؁bq_c\ AEN=6"+vh:hD|ڣ- pQEGu\KT5Z[n=IZچ:"Z[K)D ͶV%+רY܏R)P*x{ 轔@x56~_ a_ZT B  >z(ڋ Z;C+y8kM[:i4w MOz٩iٟUS}`Y88=8hȧУC{_cVC<#< ;\ʫ$˫\bY=+×tr{"Ss.YQW'O'_`mut9m?*.R#-stQA5ǹNޯC2Ssw=csutQB|q?;1zӹ,8 zjRܟrPi>uCZQvżwaV+V1Ia9.xgSar^ tm̕ |$Ҭ|m8ri{Kh)Ҩn_֗{y۟L߽_͏>b>㻟nXIp)ùuj!.kpNC׼iTm]κY-e){}}w3\1z]}}SקO);:UH3^..1)}waź„*4:)j74rYJͶSU= 2 ^/  ]C};?g}_էWŸ b)PjyDTÅi,IMoX'}WF&Knl;+yȇt38s}f8rU6rRa" - Y(bhT}yYv]d  t\C{ǝVȌ/ #e("^K(Ahd.x^ڈ#A72ʽRz\L8Ke.0.*VvHn^'YS6{0 hteX牄/Tz@tYmq>$k&W;oܺw3X򮠭}+ 7kE .4Z&={+뜽 $\\9ꯊz -c`p] qA)%"`2.@-E$ +ʙNOpo+aoNt uLhK3f4j|siBFCL!R;m#雬f],=0D/I-hf^,]$jnHzI""ͲyC[Kl2U4׹Wm:N^w.F)Ḁؑu Ԗr1=\).V>{uBZI P┳XbNb}.!Y  c %!wfj cf_>r~Y Y9YY Cwj5qҷ?vE|̓JLWJzLxixNRp$%VC?II(<ÓhP&b}GQT}SceZ D2JLa0w)ՅyC-EoAGB i D N[ <1ZlXb!r Lh/4h%ig.]i-B͟mXxҒ*qZ~mʽy[˫k:o?xk&&5JDBXgZ:)#1&$q뼗yX;MpRqO(j#;-!%t5=^Y4KSDd [B|RZ0D͢Q&:T0V8aaqȅ4* L1U |@N@ kC*{@ Ҙ.8솋%X'cZadWzHLdu4(@%LR#1s) kqZfN˜9!99bvڒr=Ob<ס7u*cޜedD~À M'-w{h1R3Qplr%6" {.WP*(iň^D(ׂyJR9:(G-#c٪mG ͡a9$$V8m0RS-KHERcib ;g M9L쾵FLmˢgXhDF0H)Fǹtԫ(@NYXAcTA2`ˌւ(`n99l:^iey%nT$GE5jiwőp.uXER wKlBL_H%1QMKc5c|u`蹃a>+&L.DpCLDXYh6dhFN ד[u:(i|nzyVTmC̋NIa7s W"[}PreQjWr~=QφJcF1K)`)$Ť7ߒ$$$Ľt.b^QOga"! r#c{9Qc4,klLN!46Eԁp*+7 kT0ܤ=z-7 >Z^;:v4gtn\uj"*SZ٩_&*)S?u D|Y$ VGl7%A+Qe)JM.@{C-vS\]&6/!l?Fz.^%ݮVVssWm&Y~.,]_+FZdZSp8.i#D7)(9vunU1jx{6_qzhg|K1MgM=:,px .1- Vj-0CTYwxy9ys2LF9\2u0+-ExK}a5:]T`5ves1lsvn0+u)ʵ`8dX[' T:exl@SyqP@䐤XHMLc1>"'^q*qrӮ no8VR79/%#mV|hWl)^-aY xS(q.+ )R&p*82 a )M)jZFe)A@[/5^#2J6r:fTAzI9IaeJp 7Ea3UYJ--#/@ER@ل/op(<([& u^`α#2\*밊A,P"JFc&=j*@ErU1F9>t'%c}pOoӀpD t5NE]Ra?6p>^NH%'j~^eC=MV~nqn#A5TQD^rIjt.~ZIT6~>P&{+z'M[Mpe_p.`9VNw9jj/9v֖R2|%gS4auRÕϫˤx) }F ^2 jX&{fnUSęaTV`r0wJM(O7zaCq7J/݄a1vI;OgREijϮ%~6ѷasfd9u9PSLvC5lBzmK^x PN:Jry$G,IQI)M`uZXxB#^zzZFZ||ժ구^jղ]t(]̔akZGҴZMZZ˵O˯_le+DhVHCKK)FDWޢlCCi!U먈5.l4bm!2LP4xm}>D񡧚tdrڃ`Oh81p ؜W-&u>7?tքy o0i" Q#!B e*"Ԃ9̬TNk$u/Y XX=*gCdr?f.7k޾9}gϞ8t0?~4L&LYD%>LGÊmy@3O Օ`A!ͼ-)ϨRJ Yiҟ>_WŌyM|ӊ^=n*PaTvkUQeMXh-yF%AR鄡"_:7xrb9V.ib-s`zV0f+Is.mq?3~(Q a?:l_BM~(~,q}Evgɲ5v9vksqU |1[a6kغ>ݥ)]}z),)V " EuwgNްQMFwo}?G<#)G4F^-K&-MDcĈ{sp豓H '$Mosl.SQ&" i+O &U20rńwVwLΫQNFnFN<[U Z1fh<7hOp-P'J poXX,r G gHB4F2L: hA"h@QAsglrD`RԀPEL: iz;h4V>to`ST^-vE5;LdQO: D.璁3Rx0O1{חxh. !F)V HT# \"IkO9 9I8z8W"{iV8[p4"%DZuv #QHYu2LwZ ﵌@& KVU/25W ʽOLU^{;άdj6w\eSVL|}̶t]ݻ=L>kИS (BVfd$VM. b2IagRr:#P7unn=?75r;?$_oAjmw"q>qjY Xc[*[-Xs>yӮ6 tE]jAFjgaJ|.Tdz;zZa>sQfPEaaV!c#ٔR=|v9a0 *RV/v՞;^yN=<KAO寝*y@ S>݅ *X,:uyo0soac`\bҥV7sb#V;j+H/2xIcKd*ªԔR 7"z/GH_]`#s"j]NJR%f/ݧsvCm% q8~WeoG9V#DnUdʹM0ç(0^nzh r3졹 W;~grY߽d^0{<`Xֽn# V/hX&*^^>6t+DZ6^uVyhSw"39$u]:ޮNi[q;n1-7mQ|왦4-3.3 Ѽn T\n9.w#3 I#&S|qxEgwV#=&36xgwawjɫ*5DD׾vվ57gL.UIzʙiQa›q7ri `i4N%RG D XoJ-6Xa,7*%SNt}-,|>>1s㶷X ބa7F86)mokզ߫Hސشb)}qҜ4*&iPb+&%rTLJR|>`ŤT~LkE؜UEG@!.Qe7qf5PP!%2%@Smjűz0y-5 5dG|q*l˴lJLZٻ]d:8⪏ad|Gė#J(]'c*U6mYm"oO^ƻp<> |o0 %W9v%Ә!uJ,Myxi2I1S߂,,,̽t.b^QO!C isby-' c4,K1F: lp6HFjRz\Tknlltȣ#Lzu=b5:-!bjjRjMۀeG׫zג@Fz+W*=qf6tLMNaJOCuZ:7V97ؖ΍m#΍΍΍H i97 :7vzDÃ8kQa{(ཾl%,FTkVpK\ᬣIIO7--: kO3k)#i:$HBI(!H-M!!XWL.g(cBeLXό$!\M Os):/E KM5Fz,N \VN|O|Em.#Ǖ`ԎԔI7}BtQn AW&. %q &; R,\0$'+@m޶}?ٶh9 iZ$^Z4zAark6b.$7=[e+GEmxj{6syx'Gkbg @US˩;2 eԝJ.Zgw @k8#q F\erwJ .2Zq+&,|B-2 E\ej?uqu]+i>bv/gbbr<(¬t&D(x p"8z ˗ԜN^){[>]e׃n<:Vigβ6Uecibu yaRp1hB*'.i9a>*Q 3DpB)1$d 9mt~;9M?ݙ|<25'\f|9}MU?B&ω_~TiaCg6D?zDӛK۟/n/_W7Re#qHWuaa@>;!̫5Ġ:[ݫx䀫Q)Y=lu̕M\G9b*~5J G7 ߋ<7n1hjho64|M.rk}> e_ ~KbCכ/mwsYeYl_U tl?t+H6uRi2 eQɆ^SgZ a"6/񔊮mD*C ot~a~JWϿnttZp# 4Rkfԃ~X*Pf@)4|ՃgL1!?` fss/UGGg4{{f0Fr.ZW<+R*+ozhl9t RɬX>4s,ufѾZ=җp?y~HQ4µQHĈ@ hG E3Hh0JEƵFPIY>bA APy8 >>L%ߝ5 J"0 ,5~HٿK(WT <&<FFGt>XD8"s&S ъuc:`<0vy_׈^(|O; ^~g-KY6.r)Ȥn'{_2W_5*y>7xf%^uʲ/L~(4=N} 5ʦV/QgJ*CgSGΛ|˹ΝsRL3' xDq;Z򒩗2Ĝ sz6<+gj+Og*lwa4,4I ޤnLQm(O3Ε&.Lv#:&(o!a2Zʼngh;؁r&wJR9|,y&jkA)!G'"Ցi}h^4[i$KMV;ou*(VV; 3#UhVoh@=Poevp[&? f)xD#Z( 5"ʭ(jX> FE !Dy1^RY&clSU׏.6|f<z9ɕI`[# cC*(19sSUQ3{D~K0W6lvܰ[һ$.͗ܝWuRZ.p0-CWt |ųoD%m|N{Bt[8a_vX-9fsڀyJބ2ccy1Uʥ~50"(4ԏnĊ.5`__g7rwL%$RCƐ0I Umo(vHݳ1==<C h8T@了g}\,@doH<@V"oĔ<@Jhgd ̅a\⩋Nc u>1^]`pVe銟y}^_oS8GQ'{|j36j&K"3uUGnٲF'd+*ѧ_·/m|[({6)+ˇ w?X-@RfjQ|:mHA(Wk\abtóN bHAJK 6D!s@DtD1:)DWd%ADद=j\zaၿS0a +pSA[31/7[~[D_KY"-~Q WK\wK (jנ Ø)$1Q{?%$"@4FKTup`z…ZDLD:I~K(%vr 7q2!,O>=" n<%Az%<N!mgD" ̰K" ?lOl㫪^>RJn54BvwSppn"^TNr Wvx7 ~7'ߡr˃S^,r8mjd'Y)3βeYsU6F)44Gos ,Yìh:*Ṋ++rKR%%s>) ʤS!m_hzDJt )Q$BhL0H"uN)}A덑Ek5\;Id kYĔN>=D5_{k`4;?~ycZQjBH% BuH (rR9CDd 6ɪ$Hf y7r׆5(m|$M, F;:ި~'(l7u`s2\~l @ s2K$DDc"^&dm@e!KB*(Ee  WϢBf xI#^ .$=ڱQY=DjyonjB$"aQpI"2Ӕ52JP,jќ\1hje[Nk9CrYsbh)Vu4Wڨ_7߬\YxU` `ţgl.|~p]ཾllȂj n+u :㦍[عR}]ګ3k62DB\σ$T\]GVD 3&$19xK1Q7(czfl $ TR$2$9'¿ &,ц\~ZBTi6:SZ9b'zޮTp/] ]j [[_yK;q%ؚ>G2}$agS~>Y_\%+S[{Y_J6I@H-GR¹_l/wW2#0E( srA CVB+[dvrr 7>EK) $qT8Tr/-[}֤ar Z( }2uLTD*m_Հi" bj41,gr% "GH¢7#@7_<6FJgBa2W\h]{"h艷ىfRofjt5b?g癩{Qsنjnv:f&V| *&cpN[pB;GD:τB)1$+B9sx!Ύ2D*B$2!7 #gÜ9?!?4}Ȕt}v*w ֟>&~PMa{I_of[hoP9 1]avh& $`Vևw!L#/B $94IpY ߻[?1S%#U8~\iWIi'mF+-2BLdP\V; @NE^idpc3&@CgޕcR> ،{f1Ic ֎\TKnA(Nǒs$ރYъQa ו3nqf$˦PiA scQ2T4*fm @8qxn dAjW*_=w͘(1LC8> PLyqb"ou-//Ea5bx7 enCO?[>X#--~tM=*M|S/~Ũym_mR=x0?j9(؄K:7~_؀(oIAtnLww\E#5=;Ghpz0X<6 { Bw_ =x3\2Q Z>xF]0WRs3f40K_arB|]b. \F~דN!h\ҩ}Y;z\ Û׿I?߾𯿽ë7o?`>\_^6pTe! jgt5x2_~`h0xh:C6g]/2T|x}5WD⷇/ug%Wclt^'[{z#d9t^r.UP&TAYT uCmuDm_~HCW:pi~9sW'O=(]~t|vh|=,!_~{ftE'B OqQgثOI Zn ~>|G &4)_/O^6ӏ}UR}:LG7HW/\+ TZم^ÅYcQŪ$lGr aڏYN2<(q=i0wR&(G;2 )4'$ty@zi#UQғ"fY#CoqYSf:qP22!́`Z`η,uH%d/pyIV+'Y0|ߑ}ڭMwWZ㉩J3;oxd[g|f)YY:3d:9G1 pb"!;gsS]$t=[J'(-dmrZ^N" @hGrp)BSϘ8P=d:FY^10"P^\)/S9Ϙd;w7Qʥ敖H?F-.`|\E1mXJwgMW?z7wo>M}.Blh6<R KaYSFڇX_F?_d&,GM^ѽ-|vꥲgt>bE[׉Ju"t׉<l,R͓+㮟~o8ja5;_bq UDoo?oIGo\Nu_G#Δ_~;IR`GR̝E*uژYsG1.OV+vJGDN!Obm,2!Q"a\`$vEt4D{R',z̝vX-"ro4hø)aY}|SSBT۝yL2n\hg欪xy{6"@1@AaįJ(ˏGb:W[k>zk깦zkQS(ɒVɒ6%s}\/ׇϕ4s}\>ׇz [\>ׇ2VYҲ>|dM-$VQa x}eł[`1R3Qpl%r%T]'@eԊʨ-e~wb$;%#HQ!(:8G-#c&ڎCQ{0rHRIpxaR[ !`}J!,E N&x[>l5j4]}=M,mN /ˢgXhDF0X)Fǹtԫ(@NYXf<*srt>U΂ u5N%dI+$]RAO4 X?O]Y_Z o5훿H|o}c7`Tӵrqc̱rؿ-l j/!$QK-9+R 3<_>Z('z}Gz6[c@`!68R$#/gNysQ boG$<_?ߝL^Ì6Rɥsr : /^6WLJJ-Ѣǒ Jy:˜~w`_pn.,YydcKI S"!&"pn,,xGI4 GZhiէO'uP _ԭ^_Ȗm:.tiZxt̶_Z(k_YXiJ_0›=Pd-ycXze.T73Iu8jF3hϘR#63y3_̣w1*5u" ${_z_MlQ^róUӼrf^ 8Ox[8iaoSe;j˫DP%A镏 DBD2h`l%!.T5B`KHL gc4LjzFHg &I}F LJ r.*Qѩ+?GuŤV✼+ XTJ[eu WqqF J䒳QWZv`Ӭ\ɫoR5sb(#ǚjpU[v) +?)~ws GM=;R|%U/R r5zEƼr \"~ ͓ɨfNγKԻ$R+ۋ`ZXK/&zGoVɦBT'nFVG&6!!MlF-c&6ZO›x"{[z wwV>݅F5+EǸ.ހL"鳅mn&֖7 Œ9K˭.#:F j{ͅbXM y66+SJ}+XOeq0F 3(?}v{;c ԱJn T"PIqI휩Qy`\ ETRc-GNkǰd\hZj>4(R7O- X26DB簂Hm{BwmYg -] ،,03`2Q5E*$[y<"V$vUuωs%ZgPFiO$X`L.3MўZ!/]{f*NkZr'XfD%k*q5R)GZqrĕyr}Z&2 "2>f9^\!KWT G?)ѽpN~XLJis'UJq122T4$ADYfzaI@ah.P鍧J1.5(ay֭2O5١?D._5Ƿw30>ɋ771DMY?GEu kY/0 {fĴNx(@uSf 2b lڋ`-.kLQyizߐn}CQ̽J)ڥLA):QpOт/dʧO ޡmWk:.lM8@k{ͨ|()~>*otw[w(Wּg.+f"ޮmz/㒖wdm}欰<׭qkrǜƨdb@5ߜP!"e`t<j[#ҎJ.{Vo^}8ڢlsf'Q]s|T]\7z]˩$L>( Qwp ΜєkDv9DFrT!S$LwIo!Zu|F3.9wHӚq]lgaXfd;z[kW2&s{}? 7fKwX5Kf"ݠ[%JfۜQ{r\!V#׶<)h?l*ߺ㏭VdT!kIJB┡>鐌cN[J Tݒ6rKZ#^e+?N*?幍 ^Ob.ƛn~a]/zOGrO)pFs,^ D0 QD:p̊Ѡ_Q%$!PuR0󨑙B$^+ԬȄu!uDmmXc_(ģTE)&4h]pj{4^j*|._U8[Nk9SrZ9L1?5}m)Fi6kNg  Mdj9 ~4&-kLSߐړ]:>-STߘZ?ƾb e*4Rm wF7ltS{e$x=\=Fߏm9-Ri!J4M$J + {JmYN,D-USyR6p%#^>T=kcuU&XLMUZKUJzJHjB5i2q=WW/P\iT_pdm h<M \ƈL&MHQ/H+_zOk@qbF6پd<^(r lխ)tHLcLt#$_0U*M0̖.=I}w5ߦ)M"UR16τЫovܦpEF~s{+ș G-;SaZWOJ6H\!W\qp*S W/P\1R WH0juLMWZE/]\e*5i W0Ad%f$,&;J[p 2"^8B\vAPiمW,?pbplUzVb?).ͬ.wه*jT[ǭb!4Sp1!I2X$%;ZM\W'{TGRE. .@rF>N1H厳%O[xM|aݏr:k?VIV$0BzR-2BT\fBmrxˏc?N|N7zbVvnfe .9TKii+I!I!my>mUs] *i^$-xde?s9J#chjXk < 12RtHyRyC&+)Y 7?ƇΝ-7hN޾gMJcSGr\eCFW~p'~E 1#v{A7JowlNӻ;s/Y1sy7J 1@&O%1sz77nufsYp@t/Y@?b5|U#-L{u$!׏t[5 aq'y{=ٽ7L ]9*#GMjԊ2CJ磎r|?ݚy`/a3H~Mhg0)<Aㆇ*<۟\ n?׷z?ox?=e͟߼(4Ϛ$&GY'C_Z547ehS笧 ø9qUJ]?W%qb_ e\aLڇ\C'̾d#sfRE2 Ȣ8h&zaNߞSѕ#d^T~ }xۛv[m:ʻI͊Prmwp8|j1~_T+Oў;׏4dLA:^?y!&DwK_la% .Σ=3ObXW9HK>Y*W8Q8hsԡ$b/3 giO}ڷI8F8Xm@'FOzmw?#F:֑ 0) #eտ\Oׅ7P5\>Hˌč0|K(WL"WFg|p=ɪ$kw<=qPG̟wPCϳ.1V&+ݭv b=h vIk&W۹%<JؤX,yԄVy6*>"H>XBXC#w&SR$@BѶ0x { 2e :;ci^ ɨ~p w?8Pn"?N4A:o"^?ԯi2 ;n?r|ZoS_7{̺Xig?D6L|8) UY҅О6*_fUpNQ b\a:pNR@ZQ*.T.'#Gʣ]J(t`0 IIP;`ڏL0ǃR $boăFJ4W"E`B H9X4Lcm1D] p1v|dG澾vtd m4u+[ {?ܳ`>k!@Zy*GvLF.6+GD:xfh.(Z9Ph(UXj#ނ5Ex)ZT[3Fv^kg}|< Ʃ*q~u, Ε[re*0E,7Y YZ*~dyדþb@e-%P %2)#;(#[x"H/9C##W+ExP"SN($uD:xNz<oc&'sc Ājb NP*DDBB0:!%ڄF!F:fD6 ?>B*j_0ͦgJ8ZI8YhoL 3>P]?]Lx\ٻ6$Ug܎.d% NB?%)RKRv~% IC[HdUzltfC]Uвtn}Χn~[r~H_{F3oA.WuCSe,pv O&lk646P˩ĕmSw[u.twAwC/hnY7?ntEϰЈ O` JM:*R eHyrs,sX= RNReFXkŁ8990SEQpRI`y%Fܫ $#2\*밊ƥlؔ/[3"|#EDM7{,ռ 9˝T/,YUn,!%lI[gX;0[!::؀|NF.;p>X_oY~ǍAx$s a#MX KɑfKH"sr$ GH8 &cm?vpY[FH(򥖜N)bxЄ?+u^n$gf̙7ffܫqL K"Jx$ 3v׳/JlUL; W.6J!^ڠLiL )&K탥s9ϝL>[3s#Tz=#6C isQL;Dq$CѰNυq<ͶULw[6ηv2^<[_ mΨϕ`xYL7'I7K; L Fa 8]oW?JݵIM-6g'hQ <3]>]V;S# 3sEBWgG&LLk]``jJ.N&$5llQm?Ɏm<..4EZrVLFnᲨIu}y+:>Y1Oa*>7[nq*?iVH6<Ֆ*%)FgAN{L^)zLi&7[lx*-4.IF_|uzMel@ya(jOGN:~tyTAor#{Jä<*TA:'OJ-6Xa,7*GC5xrNj"harw{+{,*YR#z,q籛*%q];wNW19BM!y?tήqSr bD́91"@3:T,D}iD}0]KIԗf{@9x6D?Zx sZD)Z2li%% ,9b`eK/ k`iJ1qRHH`HsG^/ND"֝u9&r6ȹMA:2,"1rbrX+F4v;fʅN)XZr[iO'qh)#G?.3= 9YB H%!"/ "R}%s1YjU؈ uVN9ć:ҽܱ= "bdxYd3‰B2I13$8N,'n(\'h;;*[1.֧O^Y`G7xlŷBNu#C#AL%(UC̆=[X֩XTڗd"dKoĕlsO4B1#}vqh׏2Oul'쌥*dT:HFoWp!*-ۨ r, Id$\.%6!`BOFҎY\p-A[8z<%w6-. YJ)"YIϕ Z+t7rm&ˍ_zgLJHxtis׍VkY܁u=0Qـ@uQz)i3(7N"ZJ0g9Bi Piu;P0~tL*" 3âҠj%1@!$Pdbz{*=V 6U,y}шX{.v.8J8ke%ĊV Q&zθyg.Me 2P6Qq @+c47Fa7b2!uKHvG [zs S t>ڠ"mͧDgۥ%rXeH^ZM`nI5Ҳ2.Fw5\D'a]44m]͓ >Vf_+o M_\, G&cܘ?z]]OwE, b0t fFZxՑ|aH00a6L j ULb87 =ٻM<fuT֏zQ-|:ӟ-](>]}g7߅ջWw/?Z(E|0s<1]w*CSE: дZh`>tBYoY}𲙞s:w+yȻt38s<3 6Ho"ZEky܉\~<1%R:M_6.eבY \`\C{;7_8f1~ٚ x-eL)9FIꥍ8TY-;.'Ē$kmbiqYS2):(]22*aG&oYn#< D[$lO읰t' }γNUlpZyN[Gqڒ=>XH$0ZH.PHx3jg>MLj9+lA?9SYBplvz9uHx%dyUw"W]m~7-&_eu@hw٭fu&J"LLz01Jіi3̶Bʆx-BgwFt0EGxW,GLsܖSyqճNffV ~9spR7E=ĸĤ@c앦J߽sc}k%ldt_G:SuRL)# ĦWJ,]r1 0Aă.O(b;[Ns>;iVFrKIcSyAQ"a\`$n_u4D{XY; ZDl h0qoR0#4'(xAcΦM/Rqv.>dx]\ ,vB+e<1 s NeZZ% b@ G>!Mvjwa)oau=p372Cߍ~ӕoeg礚_bRME> ].כ?wlUpS2cmD%ge@ɦ+zޠq,:.lb(&4%҅tDvޯ~Qʦyг vJ3L /zMw|(C0!B*CajF1c2b=6MVHKD`6.s5_Di_c"d(<ȼHy-3>pyă=њ% ֩!00}T ]QQ HB9UaR":6v6$FUJ$rGI}d8KvOv-eB;.ekvdt_/xװhO {rRC7hrcªʸ-E^);ʛ$<`TAYq%## =rZ;%Bw2.tb'tL!}Ԗx|cd j0D`R]sΦ3vR:38xY]zA-ꋕMů~p=$GJ1b.$uxN0+D{A{AJGt`ZD"rRuǬ(^Ғ#ȅ4* :15 |@N@ YC=#Hc&Lc># #$@"plzHz/A90I̥.hň)-SZCRڋ]~< 7*%ɛ3^HoT*}{P3R+{nR:"ּ4ɒIGK++mFE8O &msg@MLr3wGcYr$e~jjmv#QT5Y+XEd:kXSĬ)b1"fQ*.T.G3@  dȕ$V "_I іָ`*!*fxJ&bj>M-+b.sV Wۧ~c[T{s-}(Qj;9NwP@1<+*GvLF.6࣒4c\PZ7T&h(UXj#ނ5Ex)ZQhT[&f ^ʳ^{aZpWSpCܧyƳFsF$| ׿@N&bI3rnro|1N޸qv!QNy+CFhu Qd~ZmMomUn ۚ[lk;Pk'ہNYd[ y2Qq4ڴl?zݸF]h.xn'm:ʕJ^‹b5M+]*rB֭LUZaZڜqfJ:J37m~w&hIsEbP-E0aʛ=kAVRq>xG{k^GEŅ[G:k9 4q`wE Or5Cdցk'X=qA^Az,|\\™et~ +*Vp5%QZ]KI,^ 5%Q`I#pEs0@&&s/!UR16τЍ:ܾ4z?W6NFp5>₏tW#.cx&PQJјܤ.}x@b@"Z^8rw4 :l*~^UOVfTtI蒥 XNpb# OVzKvP-W>c"`5gR əD8*E d=Z8#p9ʚ}4?}.Ɖҷ{b 5hx~U.V=ahIE/1E,Q*z\%De6R7yӭ1Zbovz2RmE&0̱9f m:cn\|$~ĥ_IyvR:2;|kߩǙ7\\Q* y-pRF/R5pJH0L^\ iK,W(-'*K9[՛+9vEp^ \eq\~\tB$^nqC = ˇѤsRG=pD(@ZZD*R!<1\9YBx3f3o<\`D+"(0%jHa^ ){#pR )|5`'v@ W;{sݤrvG)eP=\6pSZ]\pnv\ \eipR +)yEp 낫,WYZE.3 \%2\N#Mۗ{<+G//F8-M˖7٬1ց!:WJ\srw߿YtDZU ruUSxA`w9ߓC@6 o_/dF*FU*i0l;ҍAalys~ב}MIݏvh僇t]^bW) gWeq/K җ>rv+Yәl$ J 3PIpXôsK9e'/{|uHiPGEP8o~떗y0LGi5g_(\Ba,E ޒi xng4t]s*ufvk]Pcl[XakmBh!G꿆3!;ܵBHt6=tƔ7sN{0l*};Wnȝ:n:î.(=5#W i5hW@ g0Z,-KwPJsa4aZxul]|IP}?fB$aD.-RDDYO|Td哕A{qR8 0A8QsEd )%+jMIyggsJE@z%)kٱ+aYԋ['&I1_JwvErQNn6 űIF&%ҿQŽ4G(k{3doׇ5! H R!PM!Xp=i2K-L*il&{Зm9Թ[>]uID(xiZ;i}3 w_J3{bP Bk 򯱁NU1f_M*|nxI)mA.J4bX)<k1;cYj4j4!Me Mc|tĞvk#~zh"5v~/mz~}8 >|P `fn.nVpd}\Y4̊!yo&yYVXMU;dDnQ}iʘ4vy ,wcK,10g uc+̆ʼnb1 ]_VWN^J7 ӒZ7mnb<;Yj[e(= ]-l7Hn05-!ie0r6Geʏf`,B|qZNȕc`Cmcx귟+[fV ,R"kjY':6a;zQ)"/e .v֝zb4k_5[yg}~Gn6P^ 4'3ޚ6Nj}v9 GKOLPn  UyZv=4142iɸb^Ef r@BR$ k d?Y|Bo>?>žw׎۝N7ƹf'-ZB?"x ]fcևO+$FtXHmѮHUNV+c<y2 ?S!jrBzB>qې0}F+MXuaH Ԫ&H՝K?Fd"f e(x,w1)&ąf++DXdN2lr))\Zύ$)]f*O5sNJ毵"Bs}171g/;dkEj맮 Mgds+&nKP:)&ݒbBKr\a6Aa;8.8ӕjAK+Q:V Pν\3#&d*m Iz MyAbh Z)Hi0AŠ7Lc^U ZDKJpaU+iW$6qkrgi|l:CG}6GHdqHփ6^6tH/=o3J;gXX/k< Bt ilT>PQ!;Dh)Q) Jrc@v Yup9wH^v8\_?ę\wB.: ֧q9/pMhƼ4$LDp4RSF%D+dp*#AT_ml֡K6OM癏}{= &]f9;́lz<͎ILWW` ;U( Q=io9/`m,0$M&da70x&Zے#IWnRےauSd#bp ΜєkZ.#6LI!ZoОwH7mY B|Ebg>Əʏ"~^fٺ)qvɲqv8ehӝ:XIMnl_ auSAꩊP{JDɨU\DZUH GR,(xAV9>$%- S'|֧C2d8u *n)&dRmsgk)kᅭaƲд yqY>ʜ]nGw;8͋_7(qk.8v+@if#9Yh\iS"hfNh0 QD6l+-ǤAaI*mICP&Q#1HbW% BBmPص-miјJBۚ(I`Y.85TE5=jLhn5>AcS.-%=h =Έ[VՃNXS_5U/ xEla<09vAu=N{''na>;u%no^T_4l„ v'ﻝg-~QAՙqO"avdO̻MEb>PRjfǭٿM.ysFzrƐ7tLy#9ޫӰ~ƞK@_ӧ7}w*U{3 V2:rcv 俏UȺg@2T-" w![ 8(N0R[x7+%O_ݕ:I8Ky6HAn2bwӑL {| &ԅ^m*:Beb+θi+)'Po_Dd5ɳhx"___Eyi>sk_Ì Գ~Gi_޸DVS=w&;o+;xw87?_^-P.=A~JgJrzNj;ri] JÄ́tB֯ SjE!<$8c?+ٝڅxT ShChC|w4[ETYݿ<@#`enC[SwqXH!8/ \ @HL::)t (D9!UNԚ)c$L x,Xp˨Bdhʔ  eY0$eDW2dH:DZd>8Ooz!`)h1"w^R֔mԣ=C;rOjmH+^m}!2TbigT BqGϙZJNP\90 n=?8h/Ѵ!qA%Mԋ, J8¸42J6F -!KHδbg FϨ})g1Ư QC)PgWP795y?1WՋggO̓B`1~tqvh.E}s6p>d=T41# yH'ӆaSAC0۫pW0bhcvN:BO٣&9m)se\uay`^Ѿ.zȮR{t?^*ĉugnO?×_z{v:|!ep^=Cn0v*2_e on1lkho44ЦY7 9)^'2:~,+C۳nxr> Ϭ7hoPWl$Ρ3(Fy{n2d  ʢ:hЬS'F>]5⋈*y^jFP;T?!:KpgpWWA#nRF_T'sȅ0IZޡM;n%]6[ NOB>x_\3!ʐnv\-,OWdPih]^yگ*~" J2)Vf4ڃҮ/۵'I8J8Xu@'F'6|;!{J?e#ah#b97ADAi\/,Dm->b "tᙛ` MBeF"'%6o9@vTqG7Yd tS2Ke&읰zfqK[>k9aL:aݠnu%BY}б?V:jIBxQǩg.޹2JI( yamNQ(vϪI?Pp 1PKwQ ygLP^*g7"mKX m,nlWlBN#-J*՚T*MkP.?a-ֆS1] ]2m<Bē;>Eʇ tL$А PLj!9S2*7J,YAg-;>fũIl/7eK,![ic\{MIKz}a DaTT2mSdB8QKDf#5xc6[mx15Ԑl29k Z7K ־Y}#(IPF41vۻln3k&o,@Zy*GvLF.6#Z: "`[`o(_EC2RI.R3HRHEJؖ)5pSz#~7L"t,}};y9NSltbkYɲڴoYVU*+nڰ9ZJ\rdR#3MY[,Gᖣ8ˑnH7r#W+ExPQN($uD:xz<o2oLUS2&PcpruP!"2 ! &T 5)TUzgk,h96Űz]W3JpUYAn윮)xr&h?Ucyj"4 v )x7va(uezvNm/٫Yu-Kl9VxZwX牖!}s1Ep~zȳ|NǍr.n^cMt<Sn]>m嶵dk~^}]d, tDF$|/`ɰljl!}B4]mj= ד>!Hw(dFH(VC=)l#IX?7RC-mkàqѠUϞ%7 xR"-!^ʴ&,{\Ը5(|9\KiKa+A%WZ9 1OYKb{SVLBȉq,М%Uu.ߩ??ӵ( WuP!*Ih{* L_|*feOa3RoMxڎջrlf=#JddM űQO֯nZtR6ƪ^bzŅˮa"!~lRZHiu)Uμs5;%*T'RLrT9\N3[jA5=dX!vWBpyHjTJ=qEnvC󉲵r@4?ΛԾ~k}S?=Ƒf8tA<0:Éc[;[>#Zf^zY.~; 4&VPޮ?%:DO"ޝ⎱7wr_cpp9,Q 7P¦K:$btq“ȭnh)uo3l]|/ʜ4kly9[fA7+zm~M&3#vDNsiSqӚW O<5_Nˑ|Ds1 >ї .)vp +hE&ze$#ZDix2I/QK) kۑvQ^],ܟ^q.湼߈) cW$2-VE:iT4Q4yϚՉ:.GE QqW$>Z)CA̤adCY,QĖU,}KwS͵ld {Uq&2-mWZLKPENWo޼.ժuQHcf3@deH.\XE/"TKBC.'10f,cph30fx̍IԚj*Ċ5"heV^*K*yb<[(Q0cc7pR I%$IdhA+ Oq.ft2.ZW5щasz8Ǹ@ #D82 nD{ՙNMXRn/C*̜u!qHk2Y->m\'S$~"?6)1t9)'JRC+eD)js 3VUUc'g#D86}-w-a }zZX[ ۢy'NάLI:Ų '̡2S ^Hgx Ug$y^8,*VB1H>K4К5'niәt4YdXO7 \YU[f~/zdVIa-Bk@̤N@ ffDsC < В!=Hljr) s A(<"!UX@%'] 'O H@v@XmԣdX76I+dOW"U.=Fr{P*jśZAKrGYuP8kL.3<-,̌pHᖕH-t1E H+Bl|)#<%Cq,^-#>SGNX~y={;=^ŌpyŋѶ1$=zhf|H_Zס-n^<8[{SEw֤nOMy5-[d{`gÏjw 7wֻ{pF}./DS˴\w^l*ԧkIҏSir/MnUWm0[MnZ=n(w5|t?kZwKvWv68T~Z"ٽ~AMgnC n][ Nf-t®^|bF`o&F]&T"P(=&ԃBjk| i dhǑM/ŰJ>'/=>]8u{l$KZMEZ_3e%ۯH'}we{޿FQŻ폙 X$j/>z8r3\O3r&n9ނe6x֝Pi+X-IG_碯mbhxpY]7P*oxm4ruB?X{ GwTw6licHFXWl";F!6 e䁏9ZAz.65midsT͗׏g1n{_uwU=St.;uon7M7zӍt7M7zӍt7M7zӍt7M7zӍt7M7zӍt7M7zӍt7M7zӍt7M7zӍt7M7zӍt7M7zӍt7M7zӍt7M7zӍ醃/醓/iᾈtl5nyP3WsRP(%Z<kk,ۈ2*boa \?/ r}]qFp `qPخt5 0? 6,.B*7 2_ISD"#]'>{*Y?*-EUjb(bP[t\sxUZB{4wjY\_֭;g.ϯyuga7Ú}2fEY ׳B8Uًg|v}KU5P[x .Sy*^밠NVEK7ָNnk15i(5_7x*[8"dP?m8w5kh[rTW߳uV|,g pCCr+YOpd$f'f?7E@{?5K7?>_iVxM<ǎc8NqR^|qH/8$ġUkYrm3s-ipz_Q3Ş*>5?ϽDoqNyWqvV-gzn9zX1RsA[O aCbXbb`FXT rNijs󠓛p 葝 Fd w2WTa2J5׬gSa7kQbѐ8ǩv#n![qb%T3/?PmrHf왳s-56xV # > 1\q :Z'Xk`uN :Z'Xk`uN :Z'Xk`uN :Z'Xk`uN :Z'Xk`uN :Z'Xk`}krU^~1krJ|k$֝`$X3a[Ut/Gg{mP"yAZ`&; :ک<^RU,󷗗}2Sk-y_O9c ?,RR>"Wd֝Y =gP ✩ JBN-ԃCxgJxyn3섗wRc7{xJ#=Um6{l98>{¢ !(/ߙ8NOxw$ 6}3rF?\h=)ji>]t-eEsɏMtNl@ bj(*=Bɩ](B : x=T OaZYIeE+FR&zθyg./Mϵ4mH l#N[U 2h6Qq ͍Q H*R嫇U4|0|kyaz5=> gze֧KT4fW\TW3GTn8 'nӯrI`2{ ؠ 0Mat6_S0LݫoOvJ^_0 16|q?c?}YH@aͤby{aB\'-t{IǐǀԭbwaVsF0bZ!tg5L>Ov-}%H!58efjV B0}>4׽a쥝S|{ÚOi5 e}0wg?闟w?9wg{{F`VԣEQnjGox4飩bG!=ǝ֌Ȍ/~+ƗʀR*FIꥍ8BTe-;.'Ē$Gϝb.E_)B ԰w%8UCH<7DsQ0-I[v:O$|S y'q\?r[ͮt6'+hbu-o$=f?IN'{|L߼fd,7gLߜ3}so͙97gLߜ3}so͙97gLߜ3}so͙97gLߜ3}so͙97gLߜ3}so͙97gLߜ3}so~9wK!XZ/ޫe*5] E](o ^~﹄;ٝVP#b!9$.6-q8t$%m%M)mzAoA8mh#ꏊz -cсOXKq 5]qY,CE$O DT; m`n^Kc%$cewgl1JfVnX*oeۥDo&eenUpBVU@zƌƁZ1b SL# vKk%i(2-1g٫E$/rEņ^rkv{K[0>T^o0ȕb^}n>lc'oiqcn`ni[0s'xC.m Ѹ)x o&P)Y'-h3]/?J~vMh{=UZ+{E~$ wUFnT™⳺: +%;PXTS1`̲34?5C;5e{s9ve7/py v!$FG8qYPd,1'1?r¸#yISB.vc;$4L̈́F]> F.vcqPП0?tv' 'm\n&TN8ƹжpDn=<;`b%.+-;03duOի\>W_{OfP߄,ӭWJn f-0TR>*_#|ޫ>I/<[ȥGd~7aquM\Y|Dy`vټ[k £墻 TyP.Sw?,/˂yic"J{pt0K`x$KEKRAE^N8YDzuOXkȚ5-kSjQk( RT+1n< %>fZUڳEG.OLAxI\M: 2,:$:Ӊa.X4OW>WYHG -i #,t0AqoF#r*/TTqĞnGY.SM8'DZbLH!=V9&-N;H)]quҙ8[qw󗃁<W4NGOw]uh67nFh+ s3iּg?UTc#QE,l:F@frApי"avhiuY= R|0v!njCN9DRYdy3R<5 L\ͳҥi_&ΝkRh~זZϟ޼^r-" `BK 1`IL"i<>>GJwPF) @qlt)L#ręW{M\V0#*( EAZaLf?Ho[}^i9Z\l'OK3)bͱ:["y, -CKoXzK^)drzŧ?maDkP S} .zT#z=_  \GEq`{nQdkF% 'ۇ%T8 yS)RR%nb19 Q Ƒ :gsQV8 .wEj;[bN*x2Wύl˯( N-"8W!B e*"F0Z0a2k.ImoAwkzSxˆZl :Jl,yK٘{ۜO:h-8t ,c\K:&Ic tI0ĐGd@`ј$`bJR.֖dsbGcGcRJjq 8h"xǾzDY"[ʪ㌧ K~{ןF#ײ̤=᩾+0&_m>L-9W W~2W\KaQgZH2/Ip]myl$]aSbL:,_)J|_`Y$=]Ϯ+YgFRVPox^{pL*"}ߴYytfL7)kV0X$J[ \ *rH))sJ;ǃ~7N&sָ&H˙*|V$xlbJ΂Hѧ<8fO98 d+3 ^js{ơI=(^ e/P7g[ٶ0(^(^ŋU x/V{ eA-VA- ނ[z BoA-P7=p>VO78Ϥn6bMnn\GHh> )|#«ILկcu. wdMq HEJHp('BξUgɋɡɡJy9TJ%;̡LtfZ^'m|-հ)BXZ"kN )@ YPġPq(}8 DHx4JDiKSE$IлK貵weoħ]}i8ixVآ:/Ìûdi߇ 7x>(ʱ8'rbQ9mYG/pOz>ւ1 v5i!PM!XPc{7ؑm(iV]wn* ஆ͞4>(}סA5a ;:_Yӥ epwymEB7.fvMOV N[݂sإ-׼DZ7{UhT0sMᛳ䛃%*lgA^dA;cev]o+c,(C8NCcec|3 c<c.xօC3Va#31O4{bZ7^mJJmlN0 7'%mۆ<=-1Pfr$BE&x4GPa*8PLS.>0 c1Ľa-x p{2fu4_@o4=^6csV`MpY=vÓ[lw5u& X_Z_S<׫ϬP~3VԠ8y[ ^gnDnA&-WLCޫȑU`sG-J RԹOHP.YоdRwOYKK 7t"/Z,yi/֪픒ߢtꄘ?ƥ Bμz]ROj h0m;m%բۓ-$-F W;XMv'LI߹it3kP@C^CJ'F& e) =?;EL.#12eLtq.@.W=$J")!gSG Ĝ!Yo-Js>wVN|Þ}L[r'*8|Y DFYO ^7$ÎvqjyFBFQR]6$fjU1uT[HιpG-˜;-[h"clkGxG`I\h&BPJeK$Ë l D ]?gcSvxelmmo}S?clm[s㣘11Yuscѐ{5Dj3-N!N Q]yE3fۉbHс`qwu r+ o-qvu: RKC} : t,uRPI[rK`RkjcDyNB ܁Yp*D&Fȧ)DVrklXy?g)崍>m3%_jmx|Wnޣ ƭ>Tt0?V'9Fl@}s bJ̍nzǧ|&s @T~oݵM{o[6ܴGn ÖT/0 o>`^Ŭ!d1;7QQ'\6꒹2fs y4ޚy`O;ouOUhEg7EqM_G7FWs{wç~}O/?OwӇDi;\aTw) 7 ؋<вq&C6g=^ )/9P5}S0~ |^y7 0cCdeu/Ta$fǷx*8)UPFp>Ȩ/0 uFb#1]3OMy7CSƞ|Pk Xg?; ^ӳ-L ``u}/4_\~6/+2館tv~g}ޖud$| 2̓v&t,\/,DoavN >f3/T5\t|?)aI WrqhӡVNΪ챵=SZsFtOlM¤s& vUI:M"C5ϊ ~9\/5S5SegYTq~7Nq5[hjNKguBBLQT!bj4Ld tR=LR6Ysn]C{{]:KQ1ѥJx3A<"1y [Zy=q(/EdufﻡQ*.V9UױW l?X^Uo"7,{gks[eo9T] l:wp>gK\ \'"(uU6 ?|r'閞shG[eMx!f73=g*Q4B= ½owt[+Y6r)Q*1w#yGc3?iT-헂SRi&x\!#SM%K dd9QG!#>hƼl$LDp4RSF%0V<eUF2cvO9|™ǫ1*Ff`UcWzQۨ&׫S0S٫1L}?~=ad=n /ߟ[^1ց!5:WJ\srb=sl$&8õ۴8K?&TL9|q eѬݪWJ59],ZhcT21 Qx*D,N@mrpkC֐WEOVVI˱I tArB,F&DP2 hd6Dr@dtS`p挦\ %Z 0bTeK Ѣ}CrAR&[[#g=ƫjVwYڻ:sIlm슏[Gص[\}&I&,|6[z˵g͝dk/AynU*/DZU GR,xAʴ.oC֒)C}鐌cƐX[J Tے5rKVIdak;cWYhZƒCN)9`gas:Tj8`.mvNs>vM@&w,hu!x> i 6 \Pcl`;8bGJ8%f&0Q|LkhIp[䬗 q}}VitkTz tСB#"E^.$ʑ}Cdցk4-r6^<$mFت>_xd`ԡ|x#Cn]ut3Fnٍ3}[ 3wMc(ԆsSkEb;'whp[gfv>Kuub[wago/Juz.jMt6'E>|g>J ^0-r)f\傩H1+X#@'޵6r#"Mp1ߏ.7M$Nno0 YX7Xݯjɒd n5lVկ0%b;/v'NϠ[< #4nV'*޼bS\ڎ\֮}RޏadY7%P-W@P`ZaL"+qc%>"B,9A]`%&+Aݘ}3-! mǵ2ʬw~Lfvj bR-7//kq#1fǘc%˺:?0cuLLW^^TL-[zSUBdU'Ұ7m y#B5eƝ;&)$c|۴tX| OJSpEGϐGbaF /LB>揸WPԨj1reh3LaufsIj- ܵ~{ = ͘ybJMd8pV 4Ù#wB9e(N:-jVnCS-|΁EQw?-8) ˢmd\1mAENr%0 qRQ"HK: dߛ909&1L-7-C ۇMG/8W%~RȑCPC[" pE:\mN+RpTRp {rA.7 Ѥ WJ$GF8*mnКrO+KьQNu@{"Gs>a{zmIKu;1xc2όL dBL儠bތ_.[QJ8.p"$6 HJ Sk y?&<6F ]-HIߡ`7*?Zq6}n C5*QǬq[wZE| XQj/ӿg6Uno] LY'U3W_yYE*/l9dzLX yfq1I& 1B4Q&: d,\XǔUEpղ:zk%xTBJ<;s c('bo8 CkI䏚^[x@C2E;T#*`^Gӡ]ڢ &b)k&)TiqQ&.8pF-)Q)#,Jrk gA~,b\~_ *R:kȝi1ޗ zo"ײh{wð%"r}i ]zqhIP#;8[ zpplKϹ&[e kmzzhJw.L5yksF\h`Ta*@ %Tt`n]4Ui+ =xJvJi8<22T*c^Ff"'ʅ(`$f,Ȳ69fO#@ehUq ΢qB('^blR39W7S|S/aS'0q*ٛDw [@V>]0gSU΄UEr Wgƒ8*ἯJr\s,~:'a7I>hkif~ĥDoⳡ'WkEO|:J&f =֖ )KDyw;+=4L+xrO:$k qZHc T9wpf-gjʵP{X -#VR$dž(7 FʍT!u^kbBVYϢm7Ur;cH.OMӀ A݄W܎N hTQKp y^(┶7`mT ,-*CZҡ08e(zbȾ= z.t*(%R%c1rK( V²Pvpf5wuRGbr>_zi/ Wb{iJ|kwD:9">sdF :#5lI2d#{Y8bP+Y =blbFaU 1MR䬗zs RcKMYYMi';[BT% H*xEFG"3A!^@{7jJ8O%XcK@C(JMJS1e{'<Ц`Bs9tqZi䴓=0ŚR굈ZkaÓ`AJt`%K UOb&S)RƎbWXTQ VU&XW\u2*Skɱ+Η*#$|(ԴfH=_]#5Ny *W_$Ԋ}f#ۊ3k]P;N3dף=ia+jɑ*t48ϝ7ę4 |CHLzo([O轴5'=n'. ZS#ZGr]qs.ȤՔ)(|^ g/V@@*Tu5ӻ[B^{ ۇ4[\1M'VkYxC;{&!.%#1\!% $h o8eb "=gT Bkx! >* F9*yINsiS0-NPD2V`T#GDP qOzC8'Y7Ά 1p&kW5 C`&h.nf> CK5mH(R#LbJDP+(; 3-MJ9A`(#em&D҂Gq IC((%42@P9+(8XK@#rÐFY |]Իqzy^Lis38*ʖru> ݅CN ]>M]N?ڻޡ moUtv0_~+95w&AvcrwJb bF̕A8O}ݛe^^\lqqxmi/$#] ZA[~Wq܃8>`^Ŭ!OӅݏٿWL :*#GNmԖ2b(7K> h_S/chz<>xhsP{^\v￾z㋗??7? e͋x?4 8\ 3 ؉<=ahYjho:47bmY7 r˸ه{WJ0S5q? 9L4fȡ"3^oRj' 7Ld#_M޴ţTQ Q erdQ|4 ^h֩הߟS#oY.;h*N-\$/\}5..^,{ɿZoE;*cO4u|2U( ?\фEKRk`1$*T𬳶m1UpU>uf:"*Y.ͬ1+q'%.y铓`?9U.;*r:4㻛cI,'J\<=s*m{Ft0ٚKq. aRK@߽|V `R % J˶s WEV NZ*PX&Jd~z GG+=OuHsJ%6h>jE p"$r9o|יg<;y Od3F{V:j> 8>e%͹WBD*ιRpv.A`BG}Q]7[':աMlt-`p{bk^vH0H+Wt5RzO_l$G]L%,7)$LoJ.`k.jmکc3+z*&ü 蓣B .p/8qCē"ql,5gR əDx*EYr #gzxmVF3竖@riR1ȄAL1G$V q3[Mh1omۗp̦j)q>˚N7 (vyvM3Ά =h4O~?Xj旗<ͩ3 IUdpBI0(%}Q!Liia|sAvYET$\էaf*BEuņ엗go=}Rh_%%~K}ŬBȬifw?/N)}9xu=t`cߔ'Η:E*"9eNu=&GsWM-Wl/U.J HƴBJ+ X.ZI xÊZL0A)M $bx.qHS bN AʍCaɭwB%xvh99#!VM߲YTn|9gή֢ )LD=+MJ:. ]fi.),X*7U*\8 8|3):wT)WY_^_S[n8=GIi)&!'x RLQ'ShɕwcBZzٻ޶v#W~iI6| `醴{ Cs/ l7Hiq$ג$Z^}$'@X"yQx{SceJnW>EwwK:@ܓnxjY Z;SJ7$BOYgEwM&oeNj׷kݹ<[[n+UՋտo]}<>4޻%dTyryoc&л'EF`~*+z̾Q gmjm384 ʑjZ57D8R!rX"yXr߿m#Tg@:'њV(-8Ө&2GelQ{,;H*͈qҸ +gSl * -wה8ۏ#f篇Wtfn(ʨvsdSF .-Z흈}5F3HfY37]ȅBng!(ɓ<"N2Fρ1`րyg! J#m!vG*cb ca"#Ȭ&{H3 7>8heRJ^'Q&cM&v'Wqm! ڐڍIvkEk&MZJ}SѝQA[sǝA݁iA)u3qS5g{f/)5k S_%"V8!3`G0ϝA6Ԍ.V&(ʕ)UHWA+P\tKל*9uv_s쾀&̙# DQtB1ɴ,F`˨0" <2YqQ8 ,v3vU!}'BױųH#G</ N~mOc|| }Χ z/ͱ3E7p4STpy]:> C/,SCoN 2Is* O r)%gji斐Lb9:i#^=颱< %U?lNS?x=|~s_FhKKǓ-f(&D1 SY fur&VGr!ဤRF8F(sy:ۤT^bȆ[pJ*9hbQd\ +(5X;Fv ܏o*Hw^_G k؞cD%=[XqN? iya D2gQ20.J]<ߛq)Z e "ZH\-kI[Ӣ-Yb-֬]GmBaL} ^jB<- z0M+vj)5rH&Wmp;%]-q8wS^n2oM#0 cd_DGllC+M Yie5SBL+2XGi2M\cY{ϝf)B4:|y(l+כ7]h;'_"L%\bɔ >b&d(|(l!VJ4$cAw N=~y.#\!^/@?O@ ?K Ĩ5LR\{6'Z@)f^1_T4~[+on>~Vts#})t:8Ӥ]n.?|Mo'$$i^W6њ^+ROwLWKGNpLQh\Mp/4UtJyXAG.x|U#v noYwyMܒ% Q6Y0f&Ud2q!J^r&̕k.zsB: @V Y喭ˊ묲VjyZ_A3tҭj6v?0.mmKtM>g>GNS )h*,Db& JŜTe[{A#eg-gm~vh hx&"5g BF%@XKuWKy" ӗ; $ ]o8xmCt¨*IO}tnKzF! fCp(/ l}69|<EZm[Ԧs ka $HPÌxpkC|+@V e UMRoеopMo_N D?dhx<^bA`1D(ld -#3V@ dm \ dX^8|w;ܫcنĬԴAF?E V Zeb,lʘPT.KQsT S1\yS5*i!$-vZ+w=NnE m)?8b-.&LZ1E)DVURa- aA3>E™Մ9V,4D4LqVVWԌO"8+9#G'rB:$"Z*f}Yr@ϷEA .]|p-Au--u{%y]3bm3$f^4>{zQ,;ppu6УEqeֶ*Vg\ꚾ f]uT, M? ob0kCAd4nTӜuRY0y܋K/??X~OG.w~ m[qp^>Y=I1xyYҪ]vMz:;d]^BՆR?ηzIhB3]?PXh[&\pN9jAw?7>ZSl\t |Wuqg۔0{[ѤFë4KGV[4IXe!(E=?" 'e*qUn1WqU BT"dCO9 FfQڟq:$ާ9vqx c]:6c`'MD)e%*O28ThMAeV9 [8dƵ`m×{(KΘGo_.SogWXkzřB .Ӡ7re=S A^zM\>VOͯO` )zj3n?H![Ak==Dl1L~*5bսٙc^Btؖ)w;w&'qW+j₿:}~vL֛Bz+ͣ(`\Wr$U-ӶFH!Up#Qjq6Zp9+ I2cRJ ٫.{[@'CttAH@Hx"+Pܰ_1qvͫrÜiј]za!!Ѧ짾M>Twf>Y\, *gw4?^ד*j΃2}:V1R>*/9 "W?.-w@ `4c믽wL=}'ԇ] ݸ֟Eg=&4|J]J#fǕJ\dL2#9x'@{{l2u%7_O6u6 ɘAfY^K"<&%K2HfpF.ҪQ[9)f`N$ <CVqMc ʍ}:*mkUֶ46OF s# n@[&ps_$4'ur+[]S0M5,<?jH[0(yɀYyMk^P`5'0@qodѹQ]хR-- X̒ R=)O@\&ZzALJiؘ85fr]8Tu!tsmqT!י<޷,}hO Tjxw?G;6(kyƕxN TnunoC}ޑITT 1אZckH sdˊ5BP) SvǔroZ,a}FcpCŜ3)&S,*)'n٣uIO8؈t춤VgM۩ lΠx"w z/LneRa.2$3\NA1U=Ty̐u 4k~~;\9)C@fLcN @X*EŮ9Tٶr!r\uZ=jtm[LoJ'Ju"JEF[uJ%zyQoq[`ZyjN7^\HuuM,kbY[Ye7g+}uu{ff;11-g֓w/?Vd%<p`Ai'<אGS{Һ+j- :T*)V >Ѡ ds%5{hSƕ2f|v59C-rL u Bҝ#ZTP0ȏܾyV_֤̾ $;f\-8?KxjOA?xKp뒔-$bZ\KpUb!$q^9?)o]wjkz>p~N6V }b޼o'D ;w2z]4O˫&!Ew0L5yz?-]x7+_]ZLz~}UVٲ?i{klb`dxՅ13jO&-5)ڣH_.?u&R_ZT W&߂ɘ|7xys#[:Vy].W]:6Ti]&)?D~Z1]z[ۜ}rtE[۶/~#>~x!&Plpsedd}ER$Պm` hM :BRÄAxL`&`S:`Օ  &]\1 Gbb!%Vi={69 ܈@J~ Ov(t~}Ѧ{yvȈKԪq/8tg 1A z&yk!B^4mZde,dOD5UGIP#Mk37oJ+DCy[EZ촆 NT@ j {V?ZܩdDDŦuw*fhҬ*Me.t$-'؅ BOzLjXceCwlי=;Ӗಬk'J>&y"N1+r~wm0|?nZFMktxLDٍL<< 0^3c{ҟgBrZ'`Mhk!tB[uHEsAeC*!WYD}yA B٘G(3P x[!j L6&~UkgM]r<&'#.>ݬߣy"3e8Ⱦ [|S籩ln&ڃT)ZW$f'ϩ!,i>1MZc#R27;>z"_+?C#\-?|5cc;x"{5@a0!ƁmC@P:dlU^2g ܯ~2_e^[]g7uVF]?g~OsVRB)fő9 4 !2tU9{WkPcs5.9*v}}^puu`EFoPP58ge cvf;' 4fmh_ZauMdM&;b)U_5ᐽَzaVǞ /jL=1؍ά[wJEqXH>iDbBd9IIK@!:'whhNH*Ԛ)c$Z x,Xp˨Bdh.hCS0$Udؔ7GiϹlZK>vM/kAԗ:2&s.Z.h/PN\&Dm p\t 1ဣ&9 /@JA'9jϡIh?((g9]V\ϴL#IV$0B|#jh2slrcMP[bsmb!u3r=?b}0LR-YV jL`l RHG}V5=jZ"(9 1%}DP+Yg4W41+e\l_Fq TDHZ5¹RJ#chjXk < 0֑ \@PIPآqZٚ@ϳ"_u 1QнYe9\OF;~Qr2UB>j뻃Cʠ|v|~8S9ÄPɚE3<: gu`^߀ NP̩28ç|(&k @հz{DqV=s4 -*$!otuA;P5sEհ}YC `N[GP'ڱVFl-ifS] S#t&kxˤ i}3٠j~pvӻ7_~8w_:8 xJpѣDŸBg} e_4 h _}.uj&jGi*f ?o/ܜT!fLC{ESW$c["Lh#6braF(@XBi ȆV/Tkc*ˈ&y7dI[^҃-yAJ~n"?+r4/T+<'}/?w4JBs5Cw:_.]{$? !茦̞Q\T+*-"wdQѺm~poKG‚:rF2rvw 2̓v&t,\/,Dm[L\YϷM ]6n2#FQόt|`?f\fP/Cռ7S'0OSm|?LĘm"z1>X;8IaoN<4p>!%SsmS)QΘ213R!=(;,K+'t]}zj4wyֻPF@D+R@TAkh"tJTC(=)]%+r}1޺^bV/NB-],r02lU1qYE~Fbmo&ײ{UwݼæɸY褸Ղ L>ܳV]^q a}i~`cyyyYSRYݶ~k냙za`?V?ڝHD1/>Cua)9DJcC*scxgBy]w i'o`Ez"\CL$]Ԃ3&(hy\ʉuආ^?^}^w''gu)yediC\{WLJuwX [%zjfjjjmWzJ$%K:dOOv=1I,((EtƖi_42l}sPtA˟ZYRA&s'FK:K&xb7w`\x*AH\rEC&h͙D\nb.E dཡ8UepJ} inviA9)m7ZSU;BSܨK>v;&hc=aȥIE/ SdB8F6Jxlo f˼0ol~f_䩣gK[aSfa,{)s} Ǭ'C)(=sm琞wrTغe{Mb؟[oՒQ'q34 wdIiItA9ڟ{BpE}f/iAYGVLSw|l<VL|s)QRhK/ Zg j=PD]azqrqRYpreɘv>:oVAP d@0I*xW xPJpŤL"&NlxЈS bVhb-Wӳ:K^{9w֗~1=~Se׋-^,oݴC;f̜QUs|j(&Rp^$q14 Us4C׏B[w*~ KmB4[&H] EK5"*ywY7Yus_+BrWl59Ԓp{uݶly徙>e3%.qt=ng5Z0t̂wnGNO-?m{mœG&K<7vz_|ufk^j{=L.WPܪon4%'Cq<1ۙkixT{n[|MTl4[v|S#G x)TiRdJ•.9 :N(_O#-Mu$զxwc@`J1<#Qa3d8"TS$Da'cZ1׆CLO4)olPV%)@$P6y? ^dnfCn|:BV$ڶJ =DQKYa^J]j(9/z[~XM-lP'r N#g4J7*r-(L{54PIBR|awq,ԫgA#RȬc.6 G:Y & >&wMC~pg6LH qފ|-C=F$coylx<6 6wM-UkkZ`6pќ5}kVJ^CPsebP-E0a6C(n o?D{uJX|JuҭIAxBarJu7V2o81W̓Wݧ#Z*2s4WO]\%_=JP.VCĚj^qհ& Jo=^3b<8I#[72u=qix5E?*m_~]KYEpD0kɚrBquw"d1'[޺-|T! ϗ:븇 .; 7Yy/{d] H'-}OO=CcR+m),.TrӔczH@iy|Ҏ->!1hMJ5)J}f8HS$&8&<$d e!6 r&~)uz4V9JKe(E tN&A&d3YfӲsW>J]yf9Oj?~:?wysN$ɤs`HBtN/·eP_oKM4{D_[h"(, 3 +͕zP./#ue^ZJI !YbQ8QJrCid qBmkM]{KpDYKvLʛnNY mLqg&|W[G%|{rF!W5 dwACʠ#$x~8SM`Ŭ!Ff0-BwBevEvAX+#@`QN@-ifygG"Z T][&UHU#$ǟ޽98|_{}!epÃ/P;0>'vF{!@vв/oXs/W>ٻ6r$W{9@&ġ#v'{D5((mM(! $Kru2 QEd\|y}tGp/@|ky5~{sW\Ӛt^^üIoe,i_|W%XYԩ?j\x9}5[bDM/[-d#T *(/*NґwS"<בyjz9XLTy ׶+}'ԇnwY zU[Jp>bAo IELxe!DLF)kUIx``S[9N g4,Ŭ e+e腭rcR"W͖[DemX ՇOlO Z| Mb~7}'ワ΅bk< ׏nVޝI\k"Vc) ˼a *J$ID"JڸYxT|P* 'ϦJ>ڤۜ4 +֚r`܊R΢ Cuk +:*/^^b{]Q116׷]_|Z(hnH$ZATu$kJtHY*jlZws|RBPOdK'.3%":A5vvV V@=/j`d%I$FA;GZHV*;JAbViFgVZ ֶk\HKd D#dCFeEѰ,A lo-g>Z}Uc|VP5"qԈv[ ")ȶqc%T^B$dH bd3So%ZU:3rvk qķF!қQdb~pb_!!jF79mO=2TsʑOx7SuO*H?+OI{? ?]V))BjГ%X{r +U1wLBځO(I}`>W jЩm BLP{i$}z*u0EhC<{?贶t?g5쎩Ss"<.rҦ0jmeO,$X-G_Gwۧ9t9fU4 yd#nx8xsceWFn[[\^%x򅞈|'fwϣh JVzKW\bg_tOǜw;3ߞ[n+LDgra}d^lxGlFyP\GCJde!<0p 㖁V1,ǍUrɔ( Q  [؊N RZlrTM˄뉶on(.y" VO[R32 hHlll7[u>wW0m"a8QAYg><%2ީXkAPJ"PD;ŷ~AFIȀބd%Fnљ 1zYĐщw,P-_ӅAZזBT|AJY䐨K|%Al8/:I ENʃj̨1>S!Y)7@Ʌ9"UΞ`,RY{>`}1 scXP^@[K:A;<@%A:?H~Wqz*Vk1bpw]8]l^U R.)S1A!/)Hy ڲ Q``cN,Bc?MY@ay#țTI6.N |IQ!G}(iwϵF5} 6lt;]툴΄)펆|)/}ghr.hBY+;F/;_zBOKcׇϳ1+&/ ҥ/;J!b&`w%̖_g*Y@Im);l&;)s^ );=yWBPTTBUhݢrvK8qmQ6eh/[l+FLQCY^454u0􆲼J+ƆbCYE_z_4X;/Z:#yQ.d]0t {Kߞ*oO'dc,R8b3 LB6 z !b)V~Zb*l.Lg1v1wcJe@&?| `J="Ip5"!y2_;YPե( yp# bQd,Qb2`2[pu|bKb|^@ Df #JCax!Wa{ڔbpݗ>m=šrI lM I yC@6Yv6C`+si:c~Gu/ϷWQ J%^z>G !J铍^-bHA\hq̷oo? ) Ii~fv~2 wӽ<[;xn+ $8MUB ԩٕc]HvNJ$ǮcW뱫`Z3ī0AB1HJ+h<Q7( d ]Yo#9+Fddw}i1X=VU*`u2%,J"3#>~A=AR׺5 ;4~l(j=| HӢJVOmҒ{[ҷ&ON4UK `p.)UUK+8ǁ+w y%vyAe*wj_Xr~ ى=0 u?vv{}7;5do??@ `qJh!cӻ_6]m 7|&W77Bsա߰dMˠ뾘p]"Qe~{:rRtד8yh/rxsƀ=*Km[>5UhRޙTKRk5E+>N^!mQww7EǠUAy]1,;DEE"1;=Cyz=l^|Y|AFWBGPW]Pw*;LC{Z.ѡZ.ѡ[.Q19PFWH9RT`CTN. ?g?{H1jbĘx{#VK !/:goqȫ_xmtVug+-&G,!Q.YJ/I%4%(댎Rtx<ჴI_ʹSB`sw+vZ&z69N%zem#mMJکroةrNΏia'o| ~~ ^Fj 1kQu> Lf!kY3&B( 1582Xc&#L5Iɡ>}ړlmYSkwiC:b:nJ}ԻҷVS!#DuDAbVHb6&it * Զmat%O&;FOޕ!J`YFy6oll`ǷweE㱓!mM bƯŽ9 ,ٟv~UDLgDX dQ'z9y#z9VQhT^la( :)kD@}NLp$A圂R!8:3-,I_*0ɦ1l&Ζ ?8ys} [\bkVtxj+66b fM! 6Y |0|~뛻ҍM# VY*]w+\&Kx{,ZKԳ lb|b]v}1-,F^:s>Tox~ gOc?1.B]h骫cI58u:cה׫7oY~_7C&a/yno~$ߜ0ℂTX`PdTx2A*UZ *RryTɵ_J|eL߽"jyP\GCJ@5K[3_UdB8,(Uc>Ջлe@~ɉ" $YX> R6I)h)-ݓ%ADam+O[ AL JʙH DA6(y:[ym0Oj%eǞnJrJӴ:gM]i=?R_m&~#4'I1,)NFD;D6aBTH3H)R,V\fDgw :(ɉJ3q\GIAK鏥@ބ$dLܢ3YLIŐD;[۹!ފ1jM% 7b h)D#Ot9@\9*I$[/N &>8h@rg*FWJƔ63ReM < Pxvr!zHJ 2F:d<Ym5Aqv/dVB;Nz65•P%7 ptr׳Rid䟫,/Z,m9ƅl(v l !xЁ9ElW18~%Lr,}T? ʾaE^C>]WOgy|v?iΞ~oU~l_+W﵅çz i|5{eڸb3LQ$ݏ k DfjCiIS@DVR=nMՁG} "=|tiUX6j7g+{4=1ҵ<LHL scZ!kȬŪօ9Q.d0Ԅ*z۲*m]6&cJ"$-C6JE(R ( ͥYۂݔ4MZ999˙wzJYeQ{u}R.d8##BdsvtcfPgzƿ~j@H;1%C9\g)yx>g,!|+>-[wsu9-㹠m~#.L:\NDHԬ6 !l ͪxetm%e̥vs".ոO`,P,cA{ V'WdV"0칪b|RIġ U6TB3_VDŽ|آDyѦgl.ZZGiiuD]S 32r^Z)uR*$/h]TGJka %:R%{;V{%F/{bSc}J-VqdhPo"E=˰YElėZx5I/ƕNP QV"BFk f&m%PHG *J+SA 0 Ɵ푾-|XOn_UYӀ4@꾏/ee^3aeس r9hCg7n +](ڎU{i޼`Qd ڪ-3sv;Mfj2o Q ܲb7N[2DG-/w;J6FEڭ4k1J%E`W 'Q%mL1fG$e5JY 5I5A|}F{ 6(>>k!RJX,Jelc4Y J@'O%.^9Hzm@j`,$}y'̮~- $^C1PEiuՐz[qygRMz3s))# 59FSUܥR=o4Z)hJ844Dr9s#|/$&^Gg:I:EEnHBݹ"Ӝ=VxEȱgZgd"@*:9(h-R+ e/K* ڷ)'#\MvKk*DJDA +LhQ"C#Y`Jk8=(ԧ;vYyIk/| =ሿJ8R'S;*''_MWcB 4+%w!e9QJ Ξm~w Esv Q,)^͛ko'P "H’DIz:5^2 l'ݧpw4@lM0դ [~ݏl3nj=+| HAM`s|?V'ݏ;9 >L WVA)&U-'v9ǒbg0YD뛟g߮35xc=rݧuצ3Z[?Zt'~O5s6X[@rNA )24;:,"%g A7wXUio,?LsڅJaE'c"&AT6 ́5$霒YPVƵǵN gj4bB_#W){f/1vcBE&K[o%fnB yOK]3=5N5q[xFɻi<-!7 MGN"S jʛS/jT lQa7  12 oE Vq 6-< P* $CO m*.s*ɀQ/ R8q=K9 6ӌ]5BpYwi~"t8͟~t9(@F4o]ݠD#`~'6FJ6lVbT|*dMht0:Y- rEj3%"5ٌz0Vڱ#jlj v-O#%H^QW^*A >DfgILj[>B!33^tTP,YR4lQ;٩xLx8uy{c[Ǯ#q@ĵs&[fS2J.#i!|j0VoQ+pX`ڡeU.m0`&C3I>mW~-vCzlHVXEVu)A9f&a#ֱ5rF%u豤ǒ};'.۪.+=NF/N?WTq+xLr{r Qg̟%OnͫPm=$Fm}΂EJg>!;1qJamDN=+]]M/Ύr:ćt#gg,5V!k<Bn2r `-΂" )Bn!apMH60)FҎY | \KXGol|~@AtˠQ$J.+pivyI_Yv2kL˭ ,*W&*.JK)i3(o*E1`r(x=!s@ ^r7$?;: .[i)~tL*"Q* J3K`@_Ҵ %@p+"yFI@0?`(!V"Tð+LY/H &a#v,JF22k$B@Gxe(BG┳YɊ: kS2h;;Q LO?0SNJAGIc L]^8]6ﲟFd֧tVOW{E1 16'|q881Y_Y1*~IQ0|p8oYKnm[Z Y P,ͼ ̇bT>`ŴB@OmG:\*A:V Ԣ{׬>]lzq6(qR0TOZU6(Ǚ\_;Oo޿~͋_~|fs5 ߊk?4oiho4Ulu|vmKڽ/>X,4nDO/ j0q̧Wl$?H&T9UTwgAt PSa#}/Е- 2 Ϛ_h8rfX߮vA{ʻQ;#~}2TH(TPglByo d3@:wBwDg28 }f45e6_#՝҈4 G ޡ8٭ y֖ x-eL)9$Kqâܡr︔1Β./Cں>Pl"4e,!G&Yn:O$R,D[P+C5E.6a3)yj7 9i<~f#w*v,{Iy'^i#\>~r[Y`)1y#)(y֤35+QgD-чn^'*Y9 \%r \%j&*•T !5AD.]D@WJ{zpFBw`Ls֚+pU $׮c'7\G0abp&Uj=j)"{5*FS cq>\nlլqϧBy0" SNO]DGӪD+1#Tb,t#6y~cSi XbNNinM*lϮ/ӄW`:WRDyW*QJk(\, \%reg`V"tpĴgWa,e &;U"UVCD=\=C% wv:\ pJH(\%UwD&]+c|plp|JJzs:W\Jzm{JT*3+%`CpKGD.Uv׾Ur3+{`Л`X:rڳ)}G;hƆ*;1 T^Q+s1?0h2ɼe:y(0˧a\_Ka ާjya`#K @AxeT<ڼWg+ļ *܈^Ba򚸭XbԳl,>9V{ɱX4QsXY"M[M2oI%V0#)6gF\stcrs,0e0ϟFtWڷGYYyWdrT_4xvtϷ%Ok:di%Fw"vf(Q)Q)z+N=N?+@/g@(LkI5⥡?k[ԸNY&a!旟^mS/(X(GXAQ"PIqgG(x)e c8!tc) T&4CDZ m3S[Kl=.- f~R_̵i3?|t6pe~ELwG[2$9/^ԙoN~oC7IP! j˵ˣw[q"}uj>q58 n:LwKDR>~S^*[nnw*L6&eʋ:мGnXp/ݨ.6єxb-w8z;r=j֣qAx :S1k)a"X\y%(,IUJ!\ ЏЧKEy!KؐKi(0m>iE舣,r尐~\.J+n+b&Á}8¬z:+xʈt89izKk&b( JE c0+&KK0Bq$R"hh4TDC 18Dx`vk6>$``=0<`+B o,3rϨx鬡L|9I0 kd =!"}:fhpNJ7a΂wDðqe`h2IYo !spm4OSaxZTTݒ ;$i_N<TaIv>Lr( Z[ BE (h0R[P rqzixF?$==|mȣ`YxR\OA U <E%r9q8hj#'H#O+ȓuㆉϛy dyG܃k(2!&plSb ܋w^ N+aP {oM$@۪]UhCqHSDm B4kْ' ݯY4a2BTEeȲ3 !J[ FwSa#,a-sF-rIdݱgBvu4\%ޫ2M1NvL,3^옄zl:=BQ`6J?];+&Oyz:cC-jku`եfCA5&I&jRVHC3nbmiWl#6^TGn OU{ROT->z7HN=#h8 :)KЂ(B\=qrX}}-mp@ Lk`MPǘcMU5~䝺A1X8,B(9Hs"Pa扡bxb, =zS$LoD }a|5(̬ܗYlY G{^W`t~ro-Uf_,ӑBX9+yCaXXg=PycPa8Ӭqf8Ln.KdzXʺ`e*y0EPeVi ;Tt1^O-çZ2z5E}ngMREY,y7i#})ɫ%Y}0>zXfeCka|r6S[M 269W9v9Bb/xn23Cz kxqoEjmX!mݳz8.b`z=#6C is\:U'ZNGGbZPCzHm9sPJx l]k1wƼ3F:%I=JW}}#3Xd6bk:Re%uű׎1L$3hp=M3%s6,E *0EN93X?]moG+lX]=18nUfL IY߯zHJi)R"q=]UOUW?dXFeFU{hU_=݅uk >Zy QzǗ?F h~~Qٹq u”^ { #1~woPbua,'gM+NTk .GypINK$gZE*ŠFHo@SQ"EA,Zܣܗcpֵj'|PX١u3* 5>qGϙZJNЬ\90 !WB:.,\5Me6 17$JtyEp~pƜA\)+7< E.z%aP3 "jIn(!'Hx;˜"r3&<;^ߓN^ϧ?{v2>XHaP(U7 >?߄|rǟR.9hgD~&u>+win;gr~yGg1̹^)!ou2.蟽|os|Z{rW5V6Cnw6CM3obP}A?o=6Wo= Mr.t5i] 5;ntϗ_F3ȿRFj<1woݐYvs7|\=ܡO;nM07Ο^{Wƞ <37 .s^6gm#2h-Koґ`\L1AFy.фEsZ}ByZoSWBS#VR5\t| ?)ayIrA 8ャPݡT*&TZߝSi/6l}~0[9U\f $SMsF^MQuѻ<+N4Tg:+!%S -"1S%&9= [-rd6/NY)Gz$wc*J^٨P|R+DFL!LDH<0"0 V/|Y@p,{ky^};.{.vr0%^& mB5y,~}eSj꛹?4Yt{}YPo$lwzxazVk5%jusm}j~>׫ex/gqbT֐ Lmii BCAAIAҤ[?y :,{ P>>lt9uW?ڭﷷS4S/% cg*ԭ0k9Bwf`xɳ=5*2Lx|94:ᵻ$tr"\}oc4<?OA^1C4Y7L{=i>6&W~UDMv_F>:/i.N>{#1'O;;׃ᬫnNhƼ,˕be&P҂@7B4*QǸ ֠B (+2O1ugF/Qles8>K{6sCaxJ=4?-J }v"a^ֽ>{-$(^= zdbG+cR+m%s*5))rmIuyti({c/'YR@QE-EJ*y @HP#\)UP߁xcZHe&=+LJPE#!qPD#v:$X1dcPTqK6*ugfD[BSXVUzm|+j6딠F#?)v77T\?v5ji$#O,D ( V:Twkĥ,\,Jԋ֋Ջ^ ےL}5N 2$h,P0/<@ZQr!^HIu$WmJH u*zّ367oT[*Q.$%k4hʩ's.sz$Gb)$3yc.S#M` T[gdчruS7.ck#UޡI@Bdj&)8W\0E#Cϝ6 tr7+_(Di0g}b$dAd'<Bē,,"qC:&hȁ/PC3\1%-1xΒzV(TO'G8I TJont1Qc#·):)ID+n>hfY>T=a(IE/ SdB8 ]ʵQK\FjmZ-" &oSeͩV -؅&~& \pR74g͸r_StA@ǢvCLTPYDggN>6L|*A\C8EC!+4F+"Ykm#G_fY|;a3n/0t4q$匓bm[˔%%mnE6XUӝն':vuGVgKio&]@H$Qqn|H>Xot ]FHh"~Y1xUub. 9iY,ɆvYDCpIj$O lȈrt:q5ȑ.4d~|40H7&mDŽnL06';?4CK}qoލYx^Y(,8> 3oka g*sdlJK>L$PXd$Yx+;Qt@C1=@yg6gnhz ot=ԝ9kI l\7]Z}D\6_hhn_nwos0n˾_?mtgCwN1߶ovky~'-/ܹ~=OWןPǼ͚ ,|='ohx∻;^Syv4ß޺maQ7Y9r;GA Z˭s4Ѭ n[,vzgFyI׍wh# rez:0/mglݲd!=OY}X#b1 63fx9 @Y y֍WJB Ke,Ag*pEĐ~9L6uxjl6JJc \pgʼB^hߝuŦ/? vρT- ,d4R@.i^X,LbFi,X-"t]v}ڤXc]k y*GG I )(˝XE $ ,I$Fhekvĉ<=G'-q}S1b4vOcpI:ml)Sg7-\*h8 =Gco ~eMﮮ, '5=cxk9#scAzfr)"#+сW)0ylo%uA>يDz\S#mBb6.O &&2zDBL1J7%x"/8(XI5Hszfc%`P9H@!t2%Т) #FHI#f;({,?!ͯӤl88Re~hfflH1i| )(e#m+ОvNL1X T?<|([eCM:G3*D$]A3z>AdsoI]1BvTV%F1lj4DRRX9) \Dla/<~ +"Þ.>KqR>}]v?}5}+ G r,B/VwQ`)KM?лNH2-wEмҼmlL{ۋ޶n^ͻ7A&匘S/nYcEP&7̽i=3^d4ik:n?_;Z7?_~:7>$;i ^$L?Dx?_Ui6]ۚL!j yE\R?̶x(_?\\Tf\{ľq=8 Mla¤.ͨ e%M~*&dzcQ97 7?I҅?'Ǽ#ꟳ/}Lׯ~/A;m5^wyo~z򺜆o I pßWژ<; g7/K_=li.|~m9͞>3l_\nWF=l,]kh=7nmx$!*23"!az*eL)d:xY[npM>_u!& Ad޼tH+  Q9n-ʙW>;|2Fi<Nǫ'.ߕ؂XT[K bޱXٝO9#:'!ΠK$zUu@(gW6dr py=;:zvJ0]=o){ˊюi :`5YjPy).QENDI K E\q$&a=T>;m3KB_X'SO:E\noa[IXLd҅"J,$m1O JfQZd YFܭc`5=x(a2fw/K(el7zKnߥA[h$cj-i[G߽kzg`x;Xa[H+W|b{o6.LJ=bwWh@}}(j 2fzܢ;[ xlHL";|g2ՠUV6Ou7BpV0/\IBq)s?RG4Ƙcg!-ΙJX,Rj*@ t1.F6Y˧JLH_=)8֞e[wl?n+hZѲ9oPg8a=i&'a0 o%< v;>gӝwU_'sr8(vX1zUr3N~oƫ>%G%s,˨IUQ+/]LJjhiȭ[H-I2c9ڃ&6$Q 19R*7,R(AuM\,.B뻨&WmG*wL4@YosRUJ́`%1C(tx^ 5КG洱@wd Hmݠg|̞XK90fDO cgbGb!LpJLBdFfɸ^K9,2v,T,ܫ,\ Xg4%(\VO]^o/?!D-}! V䐘( bhÕ[KZXHs3=M1h鵌h2{ m&iu)`vZl~8 hVǎR{9YMjtR`);^'Ȣe焲CV*H1N6,WLdͺE $YȤ$+:DlcbA1hT 0c&s2Qsa5r6֨/E ,o?=U"QvV\i#6NJ Y Cb/ }@I&#@f&F82@,:&_gU\䬲\ԝ\Q݅I])*џ/ާ@x? geI=Rzn#ZxjD &$cA w۠s]~"8+ypˮ ތ&E&L|w_.KE$*0HndZ'(߮^(A[VBGJK9A)Dl&3<)0FɂW LWD}i$)n$BpH>f"Zg E]U! .*CbN0Pp<JLr4ٹly.y*`M<';ձfoHM&Zv'냓ւ X\6$vq"yh]1(ؑ1tE2жfH;:w܁V>ŀFgʵ-ۄa"HJdss}u+8m4d^{dx#5J6;,Nu _G ;[x@N!J!wN &=s&HNo-uE 2d%LLy]'Q{6<芽/loJu h}J(dD0_ TP ǎBFT ;o0]Ksr+J*#ޞTY @ҕ,ْ8N忧AHQCr(,$̠߇~oEr} Y u6ׅk"cl#{S㧎X5‘nڃlɲ9GWWGn#1< 1&!rS`T v#娳%BFax#z zU6_W_Ys%QPɤըjIgL5dV1pN,XJ)CP#~ ,ubxۢo[ ỴqQK_v6%7s(vO `aT˜wVQ08 G0n/N'M{UGsܟ?٨~/ :ɒ̉m.|UI\Bȃow Z(Jڅu,`#)NP-Q{Aޛ85Y.K(y|h}_S<-]+Ffy fv܎:}0G[U\sEVd^ymXt;> cc8Z/Td3xW fP% NJk yQ{o--?6f.u)JnLOSY/7Crm dav^߭qj&suVɄ26O2'&Tl0󓑫C#盽P5o7j~:L<. ^gC7-۰zAZL[#={_萒e_}쾏#kk"Qg.]r-aH_֊3WQt&WXQ3\BK5qg;g=?]4Ho$Dn̚s_ 0>[цctcx} YGtttDhGT B6xA4q-K1&áhRz?x HXƐ-=p7R$ujHSB*9GHGݮ[ʷXtuOUը+&T]Yۊ:٬t(/݋-CI,D9qn PEdS3Hd=~[yY;~]0/]_..L6he}ERȨQJjg[1\jKbKE؉Ehkydhދa^F -1{N4Fmr20%P]_d,|ty\F{_a=7gmg>8jD-dֻxφ'vb !a)bP9*RRP)9H9*>0Kޟ!-[gYKnFi?N&cIӈe2Mm*al~8҄r[|>Cgl!_V*4Y *lKsdh"C6Tbb2d/( Gf?8nX~Mޞ_ý>}->ɓգg< IGMZ#[!4ُFV ׳J뷴s]6Z٩٭ %I%VͷU"jpj8Z@Z6=caOrOE`hO%IOaEvO͆&>\@`9tev2s2)e ̕֏j? $|2(87%N~r{s04\NN`o#]Tv m_O'Sܬ{&q}M ˇb:1v6ԑAa-Kzz`>;:e R#+Q BKIɲ2Jۜ\d O%T3׬v%!r7u ;g-w!mrQЮ!Ϯ2lr[ 9cgmnJֶP5[T0Y{K&6UF޿jKG$> hR8$!҂rCGMJ#xHBk$cdlp268 N'cdlp268 NVPP2UwAk&#{ {7ߊң{,kW(D/.PQɮ̿J|slw1ږW|mfoQ3ewyc6rݗ莺~9kjбۤ#_(#׵wPy_ Gw g\(13K^B ^Պ"l#mSjB:+hwj+r ũ,9!#feF>Q`DT*w!' PN֘~kX! S]|uDim_{m9ąCB)F ҚMBT(JG]!o)u!J.Lbbbes4ʉ=.>?|?r'=fɷr}r/M.JcZngeeM{p~i CsZ3Uυz06:Rm#:`?<+LUHPU-'6.\rkfcqQ.O/F5(kAWWGn#: 2V i=g_z[5#uD9J<׀'S[mEc{_e#8W L\*FtTCfɂrJ>5bX:boo@.?ۖ/C`xhrMc9_g=SX cnPj1&*r-5$KU Ce|坢|:|I!KzϵfћgooGT lT|m>M]}}&%;j1yngcZ@ NPgbq)1nqIw,4u,?) Yh]E.s"+YuGM +k b;Y8swh8$.~?Q9r!,ɜVJ>,Ya-oLЊS ZY>A2*gI ζcԖ5ŖGt00PcZϘlkAcԭϞ LI;m l km#G_Evw+@,vp` >mmdg8ݒ,nI[i-5,~"AG=t7#̘$$AU2ҖS`K\g=wgO-%G4w8tY4oS]⻧ LϏL|\u$B|ݙߝ;x#]0HBмI aE K76q?߱xM61E--y-;*+H! //WS5/Kde֒ ŵI:0 P.:.dzG݃^7W80jfחOD֪*V݃=Tc=/˺GKm;(CdupiG0J4>yO[{NVnD*yQ]QK#*tWy^L^Q]gX9bF >yd݇hx4&!KVJgΘfWĕA@Q-686}dRdYJ_2،2fHf5 cBkD0k3rvt(tJ9aP됶|np/?S^y&Ӳs'B2$Uc3!P *,Fs}V>f&"2Q7Ms#sD.aYC`x {ɩ1}; 5FZlw´*cXiqBZ\b&}W1ITBBEU2VU&Si#cd;HK s&胴"9 #K/%>0{i'}C ƺ )f! ZK 8g茣8IqM*&.ݓ9Zc-3rLr)EׅahiЖxqyۺ".rri=-e5-[g]6>1#I]֑+hhbIaH!DJy|X$ U]O&.vL,oP1s@ms@X $"9DzWjz=Vk"=K=x<Vº_,yِbR| ).AKiYdH[ONJ%zي"[e.ȽN:8PZpgX'vo>A;cWu¶tIXȤ I-QDDg! o#D23H" hcmس5~,Z:n>_ny Qe:ܜ`FFCSm1 ֿM\c}w2i{ͳui<`6B[9q{b gk6mzG-˯I_[YUQ Z,׫YK^1hV=&gL2Up^իAjN nӗnޔ7RxQJ(8ʖ j/$^;N<#O-;RAt A;EBg'A L\)`e& \Hd9Pp d  |l๐YB2r蔕D3rW)2/9BZƙJX,EH5q-׎&GwM+0w %WI_NRŜrB:BOR~') -W'Y͟ņ9ܨ.ϴMmgƲ9*tWFe_9<Gʻ'nl4gݱy"ň;x܏AElo<]Bc;mK*rH8`!a4AQrj5 uܔ(֐"g{.xM1( 6L36s !e:3yWFl?d4خ@Ԟzw=jV=y'P윐@JP1N6,ii(Rid!fHVtĐĂ≛HSŜ@Fuuk~Qr]ǡ#"#6g)j  &-ͪ-9)s 'fLf\Xde0vZ)d'lVH@^ETE2!Q C va+rv#yaSKE:Eb[eENم8oH"/NݜF|6+ i,@cSOeLS6 K,]^Ny=vƭÏ~k-K&%o]=v. M&S.TJ[5jz/ &!ˀ}1pE+qpEbۅ7R\)1"u&>pii3w͐tV1!>h4Uil^қ/wy}gan7wEvt6ZgJXH^l"82ZS_wA.]@fDK|Vc2l()6:n5Y-QcPV?ڈ.{ЌfExRmLUwV<9"($$wTniidø濛f{yj͕r$t*F7tM&8!492": {Pޢ'BAP2+@p1Vwٜ$NP9mPHC*sW^7,3WqLS<;Y*ZL&{Mtf:)x+/1Y*YF?;PD5 B0PTQC< jfQphoفil\ %'0Y#Y$`9`,d zzL*U/,e dLcY&"IɤcZIb-Abڃv$aǘ9W@\xq<JF2cs1V'-zbː!jbha=gd!4vE!d6;rA#q㎃! #g;HR *.lt6kZј ?g&0(ˉ H1`DZf@оpWrBZ23ڸTUf J}j$2fD3!]Q=%D:2D{(\P]ru# 2S 2rLc- iήYEDNjjkK3ri*ުvFaPI;j8 QT,Y!V&7WG!ɠ-.s@hclU&IqX  طqL[qcv AEH1lu:U%*\!cb*2%CfCPP`6qcd|R1;xUF Mi2UgvPL@:9.g !./50g JC+$d BvH |Lc%@ʂA E"UqSU5J$Xu޹TEoQX$<@Ac1fG̨ naK$-0&lJ80)ЙVNecX"6VRr VQ'J[e@SΨDC0GQF4`ըᔄ QvJ"z 025IX\Պ!_U0Vr!FBI1UV  ѩ-C^b-jI`UWuDM2X)aڍ#eیYUI56ѠN!'x?eid3졞y*-pdnIKDBKJZ 7AyA3 vXmAԨƂBGą1 P$bDM W$2+Biu#6S@^\@&6X/x›q+*po]f,XN~T} yrx*ƝfE <-`9%^VB0NF+?PsΡN‚.&jHmEú۴z[ ̂92=nbTnBl \*:@QR"d2KExG:&oCUQXÎ\d`:Ո (N6G!cUmQ1(=kFV-5* ΃7@w:PDڤ pd!ƢΨI`&Ybm4 M&Ll -\\tBÑ$0z`2o6/aTaM\;5,zn I<5Qjhvfc\`Qýфφ 3xĀ V:(QR(]&T*d@y!U@%IO p'r5c5X߸UaݲI=rV"bʅs <@bv%I^g<[0S! !-JǢ4xT##>j zqz?A![ `AE Yq2csJ6H0r#SH@jpQP/㢴5byP`ҷkd:"TSEce {H:? E}057FPٗ9^HL)6PX?˙@̠:= ںO#0%7aDF,Sk2zJ&`ٶ\⌞TV//h .Z6l!&\Ko&b!] 124+&smFCdN]Xr0Z`4rMA5HDXd3K^m#:ōbGk4@)mgՆ}Xj@_ڕ:4d'O8T t&ؖJǵԱߛ4v{-k~EOI d<Q\t@G^[T+XIJ R@"%)H DJ R@"%)H DJ R@"%)H DJ R@"%)H DJ R@"%)H DJ R@"%з())`OG s3ܣWJH *3@"%)H DJ R@"%)H DJ R@"%)H DJ R@"%)H DJ R@"%)H DJ R@"%)H *OH ښ2JfOE Ԭu+;5R}Jϻ޹zQwnߛiO} Z͘"tQ˜۱`=e{]R^#ouKuNGj]&B ˓f7IMcߖޔE)-ݩ1JoQ<d[V^6= _VɇK7r#lo~r|W+z``jc}.ݾ :pU;}WE*\R/4mSWցNo:5@9J;ߩԳҳCiw>[SEtJyUPA|}1y'k{t uv"m^mx vmc:pұ%`{Ljy_ųbՌNi-莋=!9dP7mlkIHD$&4I$M"iIHD$&4I$M"iIHD$&4I$M"iIHD$&4I$M"iIHD$&4I$M"iIHD$&42%i7@nzlRؿ.Oޕt2[r/~h`, qPcLXT8@ )!CBLGTPAW4ߧo KUJwNFhq:?XӅ܉l1{/g,U/֎|9A'<o٥w{GrŴϑߦ&Zܝ +ôtV 2U^}Qꁤxn:d@LW0&>ywV'@ՙ mc4,g.@n8 1J hOyn1\>f~ͬDY)x\c NUU@/HXSFȠY,6s:s;$0]qeV)an|׭߿|TOzοyo?fLU{>#:M!kRSBM}pQXM1JvJDUW ^TНZiH?L/:oH?|>z/8ON?|!0I?La~ä&0I?La~ä&0I?La~ä&0I?La~ä&0I?La~ä~a9~z+[zoմm5mח ?@S)ywfti[><&M Z2QZzR`rowXN:u4oPH@h%h:bCCon@i]z5F5R8|wkӣ}^_sۅk87l/'Wn}6^.ƛ% ? #I=Jq>&4&MWge.5P~: ]Ii_GC |/zSْ}r8g9MJBgx];OM71Av\iҶYx>.vK i-6l0Ƃl(=ij`mr-jk{.cB*J)0lHz\6nIh\MVy1-0QŇ5iϙƞx5җg} =Kś\yYkCȟRpWzFz%c/zO_q>M`f"O(FZ:g YfBV08' Y0ٻ6r$W|,&Y| ;f ^ٖ<+Z$%Y,i1b5EUŧŪ6qDk, *GIȍL9πWtX\)rkl79`l)RkSVVkSi)YA9w\d-DPV*;DklGڰL :SBȩ->LB*ȐE "! x"^, IZGema}X6ꇟM? cKǶֈӈF\$WAkJlmV)MaD)^'.ͥUJ"gF|U;ԋCu-"#bW&C89Ykg,HqМAץAPm@ӋЋqǶ̶wf[7sx 4 '!uQq]8!%AR<rRIFA*VU]Y*Bc "pę+ wyÅ i O/85>]cd%Nx0/oX) `Q)L8-uB0!PqQ_^iA_ #1g|~@wn.b.R+Z7>OOǵCL.? c~Ǜ@ikY;rZ" Q $`Vևw (4\- N'9ptJ <+5C Pqӟ+GuH-A(k| >tJb!b4pa(L{MAphaׁﶦښ6%bF/׉I",]^|?@1g4,@x'Hei*p*h%!is$J#c A[k 9 !1!6tׂg1lo?Ӌwmk?z}L9D~}w B]0Rщߨ9?ܹ|2' S.-LI!>Ωiݛ4+vsO惷h+?HqЛe f18{{nŽ7FH9C#] ZA[nq q0yW0b!˯ӅݍٿWLr:*#GNmԖ2({dž5'?>:k{˞SzB{É{)$jT"O>?<9~˻Ow?R?ۏ ٟpuVIZǛ o2)9f7C){_|}- _o4?G>ǽ}է 0cMM|`_AQ̯&)ox*@UL2DAξ= ;ЬSFϹ#2\~^]l,< ?ZLLCV{eM->?%esˉyX_&O2,h!3?3c9v*k576IJY{L (vBߞ pDHYW_a%$Rp!U窃w/ -2[G ^ZКRO  43wI4?}2gu*Dfv+ZW76yf! -Ng|MI=YszWhrw&9\t%ZWwo8sy9&qco:Ӹyֻ_'iI׽޻7Wƫ?z|ʔ$) pӌ:3vOV1W;ᣚ[ J~u|>ѩ[߫F>'7}e(5LO.{K k#л8ë7 !9Ky9\~<齛̉8z?ۢ-I[)akdmM%m#ACjμ OO7p1 hSTH]hrr-^/:GSs91@O*L䙕0kNGafHtRGpOlǁب!B V6*@ӌkf J `RXfAQcJSY0bCx3<Bm+>)G%~`' -5zg#vM<_ h7,zPJ^$tL1K )&N3 H2J{c2ƳdwO|52([x[@ m{ۓ OygόMamHPhܷ1GK|n}n -ot^YwfN>o<_ ŝNT TŕF+"YìU&s+Cv1\\ܒyz 9cqJzZr [F5 eNq Hs9jMdBm*,Z @rmW 9^qWS~z0 eh#!VMZ||f?kR/r Z%C)88嘈 2ȕG4?9ZaJNuٚPƍ_FC4R[V[\.it*$Ex *yRV1rK8p8NmfώVv/׊5k[lIv3dKHIfJXW|jTAYYB:qG#=pHra9f) $.@:tD.#9ێ5D!yM'=F#ML451)srQJHI6!(keaY -Ǧ. {|-Urͮم7\5;"W6X瘩mBν^ |3 u=ܺ^=.`Y5njb [N+n]766yj~_{ӧ:wsބ,]ƈWt.K:7yg9[ǾZl߮egT`muls)=TX\ahRaqEǒ es 䠺TXa*,A*O91k dTחv+MPRr+ Q)Zyx J*+K@T^&9wldtWvOmW7&Kv#GX%,N,hˊ$ В,__l_n;?u(rB-ȹ}R IUڟj%! wx% 7/iĝ(J\VV{ZqL)wKNhE۝Y4+{1k!%cKZm}vvW5gb^+  q(UKDtp@-6]6$O(76H2%)Ĩ5r:,hacX7pRǠSy=Y%~ۂ7/)p* nQDܥZZüWJqhB]U"UZ H')#)q+shFܼT2]HRmQZ $H- k ח|.ԂHJ;Ou>tk#Ldm2,D:# ?|N*Iׁ?(PT)BHw$ pw2[n 5^N*TN>?A'c$c9gR}苖:0mYƇA;pO@0=ThV8WT)hBڹ**$TF ^y0nE 2f,?*&ҕb-g]{y#ѲvjOJR0+>6KA%^%e<ٸTu9VNr (Szg;.>QbNAC8sQ&8"% <"L bJ-;8{&gjY!\79.9г8ik\4 umX&|*Wu-HZ_vsўБ ݔks47%\MܔJ8JCMIdOIsݔO n K"Hmi+`*%I Cnc -{ݏ/]9$;(SqqARw4'܃׎J$iH)V8pP;9+4B$7H9<=[;D<v#٧(6ƉQ:Lhq&D x BQx(<z:4!o0Ml%XМ<%B$Qx $VwpgKc8ݳ8z'"aTA%\E&o4PFk'NI^Cg".B #Qew n؜Bx4>h p5< ȴ \?{WF/NX|-2 Y쇽 Y-{%;pݒ,;K䖭2jvUbJʊA\Lfdu2)eJI)K#k>u9}26*+stEhL\76OS}3af|c b њZ375[ZiO:c+Aӆ;&iZi`mT"jcθ m&kDܤciaj Pͺb\0G*}_r[ g;DPT(C 41$*lʐI!&g "V sN׾&oo_'^@>cp4<egd3RY0lV,dP*Bot ʳJW90f/慸-ǽƵtk?>9ksz,Ih h ů$Ӈ9~jٞ?`3z0S'TSPZ^5Qش9= i,Ds>?|kK^˵/_ۅ1 T8[#1s!Qf@Qf,J2" 'M}PoH)Igt<~IڡtmvtAMjZ 9 dLz}}7lUsѰ.U]h@0,']d]JHbN/(]R~30]4iF}.tg6_,.~ ?Oj-iLGyǼ>zM.o6gET}6+77?t dZhqtg}LcBy?-^W.P^|Swt&wΕ=szkň9[zo Y*+]J8*G\sYiDFZNLN.֜%8G2ڦ(hi}r]y5 Wf%2AzÌ_B3&`rL$_Tsf>DCUVoQ n͞oUr[؎BƷr\E'*IY60Edp`d)l՜dY RZKIL/K11@"I>fo D &hC1 A*={#gT^ާb3m=p.^mQ;֙UiW0h{쟜}]b^7`:cvш9ثd:n\gMz mL?`CD/LhtA[bSP2z}gu+eSP_ #ˍcL5X.:'0J{#gZ5Ef_6r_"XzmmCMei$DpV'e:{-=E{Ћ26{*T3'#F")J0=k2wEÞ9@skȍ6@]DAT!!\wm8:o~3xkNco Nڷ_IW6yg*L^_!_)/DW'S8qw֩R; <~J>gh)5Z; ڸvF<@mOw~O< m_ƫ0ЪDwN^\޶[zwl3 YYI&\lTV{(ǒCh)IIQDx+1ek(: rIL z dfoRK &%pLIVDߛ^o9eeZz$(\):r\qp=#́;HYx\x} GźDl,+5hgY9>[&h-r=Bȱ"FeV2Tъ Da&U3 A2bn&'SLPc&_rm@.k›w+IԼAm,TChu׵8ydT4Pín>mtWw9tfM=a\=mܲᖳPͻ]5|x}m z^h*[j3o&t<.Wޢ7]xU9{YMk-iO~?HJ;6?0Zgi8r'g֩tHJRvQ7@lß.ْ}G}X o5c;B[Ѳ.BH>+%3F;$DJ"JEDѐ(QFIþ7+P5d,XGLN,A1KP7-@U[+7Ə%כ. 0m otlbD%`reQ绲.-ݣ"'\lOc+QKdK&LbV,p"2 w5-[k)j) [oo(I8d1Q{HPȆ2$(P[# M$ž o:=;=GO-#x=?8- l_!V[ 7X i^t:jl2R m20_H$"&m C^j/1A?Qc f-yUJ +ST)Rб 5lĪֆɃ/p(_e,@vƱhMY0&/t9ʈUg JL7khZZh&W2#1>)HD*M N>@):Vΰ>>푌sε=|ӲCUc&ˬ\ :h3O0A]=w9ok "u^e;(q$2$ȍկ \VnHr} ܞa"l/߯zH(jf93]SU]؀! ,{h3t]hɱDG;l 4yGA0bM`HS]S+Ib'%הokpށmz} t{?$QA$(^G j/-"e4yADƠ5f;Q 7:Z,U. =$ԣ3sc^Յ(^‰g]tň wy@Z.cK{3B7-M}2r)[x1a}ߡρdu/S=sxaFAxxpO - FnX`vD?axMXxGOgZ5g?Hq>~ģu`p*Btr+r#'Q8 !O10K #Cf=u^ɠ(1D}nīפhq l>}?:jrZv3NYؿ(92 ;  ZჿiX_ia /iD~蝀⺇}YQ 4YYs՛2fTVߓlgo^T/c`(Z}F_"SE;^IӔ7gfmmԺkgc3[Ty'%gݣ><eN`uH"4TsbBT!I+hV4oCXCdĪГR]}mƌ y1icZ`$q/-nY9bddO&d˜s+5?9b[4Fna.%QzAC߫$O7WʓKp%7/if5j .~\vǾ10aZJc΃-&ovmco}2thKKU6ƃaRBH9&]Z$'Ӗ6_SRݤ0Y,9˙5Pr}?ˉRꩍrr_=?bR0zkS>vz|[N#UG9ϫ)ܸ涇fVD<Ta{)f 8GԔh< o ?RBRw?FoCnQv'.ݺI |7nqRr_n?M~J}B9mE~>σ߾{>RVjZ]g!5:Wk\mAMI:)׭rM!o~CDhTS̯5HeHIV)-t*#H$"EBRF4~X|ȨpJ-ZE h-ܢ[/#/W&[(ܢ[pn-ZE `-ܢ[8-ZE h-ܢ[pny h-ܢ[pn-ZE h-ܢ[pn-ZE h-ܢepn-ZE h-ܢ[pnRϵz.Pu!+%BW Y).UVªBXU aU!*U_q) (U%,UVªBXU aU!*U0RŔRĄk %UB'1diw&, +dBmؔv2/g[ ~z"`3T#)DK|esZK .FD IvEap.Bx,_"<ƶH/4:0R4PiKD , čHAh&6F yWVpx(Cd(J\P`HKv&ΊK==ߣi@_9Βf{/d=&kq+H3_=q9Ւ2D{䑫T6*DDK*u0\+C ; 1m OB _ls`SԾw}9ʛ2xdO@\iMI.ъR-B (.>=B=Fvm1Dɉ >z`*!vӊQ0U06EwWLMۚV1%PubR$[ߏ4` W1 zASŐ5u:SA<)Q'K,C3r"zI0F0'm6@䀂0fu2!V,_P٪oS6 UY}_&{xxaF_l(pW ??.dAnJVkAgݲ᪷fܧU{y^m?e|l@M8sa?Sa`VL-@TAaurf 4XŲffPPy̫8ḄgAiQ.c%xOKQ{G%hkL|$7ڼgKVDSHfѲ;^PukP?jGg쁂N rns'69l ՠ5hMF~Tj-8 S4":RϘ:1̩KEXOAM[ytW9dKP?eDct+skhgm:%*x!c ם, ef jQ195^_\B]b 6Tݜ~5ysBMrTUw 7ꗽݼnKb,ˋnj`n3H/@%!Q^l\"fI8QUPF6] _Bz(=@tv SimQ| pg`.Yq]@0Jy 6s_~@Z##k=JbA **XJy'">rA1EhV&Yfc*TG#u wmK e\c{ nѤBR }g/Y|l5H3==Տ:} c!% 3Bs4f&&+\>PjʹQhtjSŚ&VzrĒ' ,6քĴ|@|jվb+ٓUijeTݻ߯3WJnJf-4TRᖽ)+>fzbJ5h=c*u [ bUM\TemM_?YQy33qAr#3t^0bxd`X=rZ;%Bu тjqb\>jK >s72x5sf"Tnd&zdUaa-X[,+N%Jm&f$f}l)0rhyJ {l>/b[eUFUesS/]"%($Mgpa2sBu:mpH`B _)ӊ+&d{˷y`qrd<ńr w_7jl|!^Z|inWuf{vR~>XQPW=JKx\K R LIHI ʪNc_`(3*h2:ڀsD`QԀRErC&z ~g}z< qYt齓կ}nM%6Ema.+Z@%4S=rRȨKfP $&K<@eZ,G|cTD#*'ƱE2OS_NSF4L䏃{v?@=X?~g(Rՙr̮Y7:נ+EU$"TbxKcuPUS.zhM2kAvެ*%xl^Y77yMԙy9ёl*_Sqcvp+rL@xZ(S.yN/71ZP9rf7(1uǚ'] }pmVÜT&:uםpfm¥L>"@lR_ΖnrOowI?i'wR~R^T7zKa2;ęnb2G;QyXp\ g=MEkyy gny5 faAY.וZQW~r`2PE}όoQ |։W@Yf欀X< nSQonJbgkW!ew9qVIYL?i/`̕ڏ]h]ĥS X*E qѵ{l*sFGQ.z6)Bk82YDYBJ)e+Ri3(<5wBvLxsݭ\U` vݨ\Y _ՓϜ˻o3zmb=FGԠH{L,qGhAZ#( Y&yp`g2D'Hk"b`DX bs$#88xʺԍ 9iB}T T-IߕCӣV6wfFڢRmmf[mj(EJnΏxoK~n2)Ǚ(rd6RafK-W[ u9D;ut9DÏ=LMڋrJZG\ )CP,\l(o!`oyC;T+6^餶Lc1>D;NxM%Qvz3uQm/U?wẹg_s5xxv,zF)؎ CV)FǹN%NYX_BAZM9MaeJp "P :^i*%p"ik%lf(\ğ2cGdTa.Xbi3BEG(o"XZ:.)֫jb;*J뽧 ga&@I[gX;qVH]Z Oձ#ұapisHߵh:T2vWlہL&]us>*QX.b9V7aSP{ 浵e$"_jY"M;۞PS xW[3.RTq&M6Oܦ[ѐ͇b;,5*Ĉ1t@Hmoj v;)[>c]JqeBa* f7ݤ}<^b=_ EoW_\Z}ܝeC_͇up_8/S 8 ;k(71qYcI#H7"9fޔM'9j zbn@@C(e&9>R+@R{cw1G=9O.߮e,k-Z=Zt?zቑŶeZbZLbƴɫ6 3b=)$TɂĔ#"ᷠo.dkyj+݉ -kYFZ`U:*b swh-" ^[0h4B3 >qmH|&Sp~CŸˆ\w(4 (l! c2XTjɬTNk$uYao߃yӚDE0ы7 ,vy߶́8'LX$.'IIR6,K #X\%;J꓁+c$|(^ \IŴ'W'WI\-N@Z̎>`r.c W/RR+⍝\%q>Rt8vJRj +M83b};ҸST {12}o{^cӔ3z0 }1臌L6o׻ڥl^LkLHؤuPU1_˛f|( Oo_otȧd.~&,CI&/wjˣ_I3¤)0'▱奿蟠sVE[&3oia Re^ZY:M ;SPރ/ M&t;76,sLMN+4vdsty LqˀY,Cj`XLjAF1<,X|hLcMCB@gx0<&tAZ;EQNc!S6碨=_&[>*t^M^*3}yHٺ;*USoY ^uFEbܸ!ΓOt%!$f>B$|:*8 ߓK,Yϣjd*i@6yɫu X Ou4$&= R@2 C_7ŧ~ GvAxVkN~ 79e[ԵGKEfQmF~,sz>>M;%_I%TbRoE3:<\}շaۤ0pmRjt\pſx W=+  :Jғ$-?zJR K+*rNs%OWS.~+f* EfEuȢnaw>^ :xU 㺨̞%g+۷[y7Q+3%u@uiM6e:2:_!Qu"G@%2JLaUjJu) j vo6C u҄</?wq~zL۲SN|ݡcS*<JYG#6pa"Dx1xsYNH⃳ .+R^9 ~y7NA3zv`ج^u~szmUsOPWڥŲZ> T`kiJN%%=2UTH>v2d%c/Q$_8ԝ**lfo}yh_N X\k7G1 bj!{ͭ{)վw_G›cٔZocH~[z >xNQeIKMѹ(@C䕖Ƿ2h2G6 V'^=껵ޣԙ.͞Zr~Iݒnw1FmWrBjZE(M)BMNhJdJRzړp 1l?kÖ)]b^a%D!̥W`fZr l0LBĆdHda}?xv6y'wC׵!!?>Óm wJU(Db\B#M #L,c4>EN :M 2(sr)e*O: 2R;#G7P ';giT`,B`j&<6)kXɖ7]rÚײ܅#OKnӁȅ1 T8S#y.$D:b%oTT"7C|A&|̼R}Qt6K㗤J+A]k<:BT pn^j;jB{Cr؅'du@2eE1;IӊGlFQHL*{&BЫjjڪZj,ҚDNp\+Y' )k!(RmPi0P XEcϨc%R2C&Z R,K=ƥ1z^L3:ɶ݇|18 U}_\5j~49=;Za_OكGYNa8'y!ߺnHڬt0r0,a^ :OV ֧XgA<>=rs0_9zyF]sR{u~;S:az0*i=0M3;rxE-Ŀ;N{Vǿ?闟/˟_o_竷yo&9yV>; mǻZfh]N8uukƽi>`|l| n H7>~y9ǩ1x5:=;ɳU1O-p ttΟfX)Hb.O(=|P^ټ0{NY֩ۨCNt?e '/'NF)v,1t=/\/ ?|W;~z1.^/O~>qٻ㘐w~?.ax_ytJSTM%[@eۚk/px{աaV"7jo!5hb &DHYr>Q~uաq|AM>`)3 [|0X6?wO^ka8<\i-}{vn]51䱾L>5bN 0\TU6b3|iQ;zm))IL &<$+0_3>Q {0~sZANOlO%;== $uIJRAȆ ,\݁M!etKK1q2D|R =LІbh)2YU: v&;~qd|y4]8|;Q0q<ɻX 3]0̸lx* qW~2U\Os5:|RʾGy-Ak+&T9I76˰Oӟ:!&4:ؠ- )(F l$7>.֝d>^) ~8eep CGKga%(N4v9)F3Et{2,ȻWh]`3q,.e9i&tbTE2J"&k'Ͼ:P6r$09+՝jy)`OQ<#(D Ljgl6mQt>o0_4itvq(tNtJTt`N/J5%Z[NN .,s_؆+Ոc4T[qu7ϷFcl3,ۼe Z% e$O$In-D6-x@* $jQQRTi9d`t(줺3vU:iƮw M {|hHӃm=Dq? M89}>9~#$hDA1r)Tcv҄n`ir IbJV#&'W8>koKE$]7J;b J;vڳ͈ymkV"b0mh8R5D>OUi9B$c]H h(:f()de,b&"D*N& cؙ8aW㺚?<>ە~uQiD1J"M(u"Xߍ`ZPvz!gb0вƋ*RL9ĴTƔ8̔ک@?#q33Cuv%;E):_6{*Tʳ&#F8DR b4E Ji=`p1{drvÎ#gc6 [S?~1v܊H@kqx7G|Gs2`Hkt39?;)$=9cl̲JPMBd/u0oUч%MA6C A!$#sgiɿ=uHNmL c {O{59Hntǒ>4zTCGT(5GTlB^v(bT)|֮ۜQ;"Ϳ);e(k[5j׃k+urr@d#GBZ^A<65Z2z}1LFgj͏/ZcCGƳh݁ᕳ{CKu׿b1? A5Q@Hǿeo E`;2н}gݳyksl|AC*RXD $BjV"dT| J0[p(EP 89%FIk E0H`td BWt&MOtiܮ[>:|j,#ɰ2{0y/㝡ov'hv7mS6ΠXncb´'N3B+4ыq˟~∬]xญi/1"{ȠAiQFdzZTqiJu=-=-Þ,{Z s a}NװqI/]aiFmjB ^Bx'I<;==5I=406$TBiDA#)䣳#O:Wڣ!DAm)EF 1"k¤%ǿE#P9@ʠe2 ɾ3q.#ĕ2 F&%dh(k,8O_vLȑrG3bi@J KI(5D!yĝ .%?aS]V}fYfYﺻ7Dn$m2籊/l66w{=דH)xk4غezs8lF2y6e-gvݝ7`[r3?dMgކ*$$Mwt`o^y ,9{Ym3fL}ߟd%ozlH؜Ksl>z?\ZX*ns_0w ,Dh%GPjCeDe=zzgp}N3'-{@Zo"P"b|Bb@N#AAk͵in:_oSbs߫Aukx's7^+{J_2cS `I($ %'b5ye4W̭&9e-]jx|9a.C.TtbdP.c n[zHU`®;~>YWʧK. )$1 ԯyG|I+:b9}s=.' Bed&)O:J1(Q v Մ;4!8Ac_|Zxvgq1p tn~?i?sAD]SyacR W _bHja8\:n9kSw4!\!I@ M|wL@{NP+}?AKޟm4:ݭn_vM3SۻԽ6..sq쭣G!)pxIrWFaSɃthJBNi}Yd$3]]ɋEgGO):J;ӻT[kȩt@ c2 "yƍ":Y%X%֭o<΃L(Em1+8NϝV;%Q(ʤ  c3r0צGd;ɔ | |]&\syLؐ?_wͱ^)5D;F6@6GABk#8Jb3O (kuj ARPX)N:(25s_p94dBKg &jL ^_QjJ'YP} b|Ph-4}~˹'಴4r~@ 6*9 RPpXK3I#ˎ};jjq;cqqqWawT] a~cq6$.-WӼ;)-F_ YrA ˽%Jn+u$ ?qӟU|VvgrÓuV .P:R ! .BI!H-Khi 4 cI3{oq2SK H$NeIr.<#:j49C =:#nKfU Y h>os>iǓsrt<~M2}|n4dןl(%Ѹ6U/g9N8x9zF?(q|Fp~QJXY.Q墢L@ʪi=jJΪum=*ًچzC^P_G\\k$ilw2^zY^ 6 nhs(/],`8`fW23'}~cˮQ} ^mWn1?/Oԫ=q_=W߽iNN#od "78Nʼ~a(B;kG {/#f"WRv`n2NY8Qr(9Éq2:cL!P}C}VCܩ.~׋RRIVH|~v/$'eXm{¬NfjqVWO, ꂏm$ä sLxHjup 8xy]zM9hpiR:0a'0gFApBEJgBi%JG+S./| sbW,2h* ZOx>0KoX)煡>U$pN[ %MutNP@~݅Y;L̝Y [^R{)DDK*:!4 DW4Ppxtw$8d:QI gJFpLUy44'mF+JJD(Q( Z \'&pJtyI8KƜAsE@J˩74 R[e0*h'!deHjF\D- ڠ50F&ց [ j<~*lŠYbf/NWKJ8ˆ<^LZᐳWdŸSL]-רΐ]m^=SӺ+ZmR_y]]zzjq_C&uuKq0LӳiUS b, p4zZ:^K4FZxՑX?e0uu*oyEQ8uE(FËO܌9L7<uTF6:{ȶQ[MͳNu|װfU _Tzg_֕Z:՝ ΐ߼շ˟oy~}޼L{?޽y j? LG`~y95^xEWCxAm6]>u3[ˡGnPy*4% ŏo:,R:U*ͶP|Oy1z0kT֞I.REh2 ڱ.7Yy*䚵_~:BWmDbEtg3G/K^677_jm>љ{f45z;gdlZ3gz1+G[*R:aAD (hB`Q9"xT`'ʥwG[޶-pe%>ik&)@[< AXf2~Hj.YL<2CUx/Qї桳KwzG() _b'TEuTK- SsEHkai)ĔgL`sT}C҈}qv鮼xxtB7l|>'eBI 8Fp&QS'BѶ0 ߑ͔Y2(uv{kzl~t}*-b7SpYSl&X ϋbnJὫK}۫oðŦɴ~^}MZg?Qybc}?2rz76㜥,ĚUZLۢ+|o=@z--:IG}yMNFl)g٘a.Mq 7/ܡ{ui2-ݧ^?{׶HdŨd%@cfg163%%] d[vV6%]P.[I1#3'TX"&q;2XвsujYRFa|J))bRFV/]""Pv@O1xSW8-ENKufSqC2-ݿyz,Ne< 8!m֙9\ 3-fAlČ|)݇Y`O2dw7_WD'!tdJo&ÿ$<:jQSqm qv-yT$COu̦L>ۜ Z qfXld< ]c,# -v2&-w'ob/tc؁D ("޺*E#Y;#&k١Ծ>խ 1`'Y J6Ѩ`T%[8T.+gJEDK[4aF3Q{j=2ح(JGnclJJ}lD֛x9K jR6 ?}XM~3W?·b7ce%;/uY~Z#_/VzJć|~[fy#~g}ȜEG??iPCfƽ}}|l.>7],Z;9.> ] K{O~^|ɢG~ANp,H;ןg|翝,E°wmjO?Nx4^~ݛz5R#? }UTmxy3]!gOT ?͝U.YcKױŞj@E SKGLi6-6>k1TTT0eP59xgE$HNF6ERmۥ#RTҪET( &PISOJfO0QEƋsq|xvH_P?|R2yY_'y+6g7J˴!|d(h) R'#{1EIh1(<ʹSR6 IbهEّP%H6["(;q3sv;=ZXw;V8%Ӎ~hǢfJKmiV26,=Yq핖Vx wɀL}ά۫8u_tbиJˁ)<\4gSœ8?/r1nZs\]].YC̲S(RV]$^g'c:VKy]1[ʤH  x(U9(a*2USl ceV&}2A=Ҷo2}\]Еit6tTD^R_d3t5ƗRvQt7_u,kdY0Ft..fotN#zEB'ƴ=ӵv7d+j=0ԩ (kv!-N#z ZIzVsN<_YFΘ(kWH9ť()`P(|&ɅAx-aܣdc⬾Rq\IvfM?{\`N(${i19±A.YJ/I%ZgT<֍y=u<]^M|v&L];G,Qxߎ+7R:>~9bMO>_ (-U4d;e#v> ;HV \Җ'i+$i+2PTi[QHKiC!`Md^DFklP9ҩNʤH*\eB ź*L\&7K#xvˢ;ݡ1uE%&Ё7d.*`6HQ;xTRq,SdQՉ8*Vq΃ϨӾ(OdQ//#bQ@ 0Y%*JRX@Dˌe: b9_%h?) >$lOTxku ퟞ4GlM ԊǷ y@9@6Rf,yR\j*a00`(H;X|P^N #O6=[JEpFc}|iHaH`ͅp#3|yV[[bw5br$G Ꚋ\931Tyi 0{KS."4O๜]LC=,!U`Z"4?XƈZo"d=3FiUÌEgONƅ?+#Ǜz;hEcYϑRǣW( Jt:Kv"vP^{g}L-6qdhQDZGv@nX7و/Ftx9JP QF" Y!k+LJN:&`VammPE/NN(#.:UHHA'W7p3?LSOt$Ɠy͢8->^_nkV jLdHh sO"yg pha[/۟>f?] QRw+s^p/:~{u6IA~3ݏccf7=MJ["%Jl_]c.op/O=qNYRs"m{{.]m%ĺMу,ڑvnQ[\|䞿 kԐ /HmI_~0oI+ןίI6{o1s@l2.[_b2NoM;|pWz7 (T lRN0Upv]f;:6i?]X|1LhwQ Dwq}#㻥*^:"^81)cp)}$)I,(zLi<1V: NR^e2\ jkJX-HKh2_Izd[uG]koG+} &A;X`w bԒ"V)I5a,GtUC,F/K#zFWO~٢B4ߟugUCIJvVuJ\%o9JiWmel#4-١P hgjB%q ݕPIZPIJJM?LM((=j-R/lYs]z~sTt\5̕(jW!rUX˝1W dWUꮛ$%\}P.!s+usĕ;Zu\bFs%`3u )n1;au<ktDZ8RV!~!՟T?Gz^^c\ðDCR#6ϭ93d5! I`R"UiOhOVwp7CsOo$c3f<)߿7plRఱOyRBP_K6#G(8r\Rf4WB Zފgj0F-WO 9# ]xj}W55K4޸sQ^* x^* n-݋5@Gߡh ޙhM]$iz&It|p;Z!,(‚BXP aAK^ aA!,(‚BXP aA!,(XBXP aA!,(‚BX<1x (6ڀBkYivV= A;(|wP A;(|wP A;(ET4!U_RO IM6 x.J\ `ס/`f:2`h]ه}Ȅcϸ_h#Dko½_[vrsf;sp=| ?KeLXށxo٫4wn7ݗ~rc e & ¼&BP5; nUTsc%bDTdYuzgj1ݬ&Rŝ)M(Yk@i xA*,"y ap) p SRir*NKcT` oJªq\R:%ԋ Muy犲tIYF$Vkq w:lA0DrMi%|۪Q8aPIlCYM 7]a JCJ$ 6i+pjXB+-Hp$4AqP8UzKB~/Ejp&@ fZd eZ,Oϴ߻Q0un MF{RDU_RLyXx5 A9/aN(EkfF~[elQXzU =GO560A;aRHH`H^{@:UuV$C9qTQܦ QGEdQ0A[`HyܶB]қ9Kgw~hܗh_I@> y\ I|*QK[:NDkb(*I؈uD邺U)]^zu#Y@x))iH^rY-DB8 ƈ'8..(0  ]/X.UCǛ*{N|jϞSON*Z#n 6.( &1C+>=7-t>~ Ӧ(Lj-dlYۈH~8sRFǙI#|htTĈh"c9Phڄby *P$ 1WHL dS(sVAӊ7,:9A 3lBS.frty5sϳX~dI >y!e) 'H EQ>;Ҙ08ƩO386"X,⃵[0Yi㽦$Dio k6fUhV!!Ɓ3u`$zS`IrL$53G] 8+#8 -ᄩKldCHPf(],vdC#4h\R #Y) Ha [a"DhA< {mtlj3{dCy &͖R#2xթWH+ُ/Ȱ":cԙ'm>TJ}~ ?h[1'F{t:h/5w1\7OXn_NY׃4qzOV-K;3y2vL V*F8 4Mfa_8 hxUC06$±4t|bsۉ_nn5Ǧ( ck7{TG~n ɋZ; ׌ak|𥳐z`Ui3mޮ8+Mz0jA5}W)z;5a&O~,~TI_"jվ6Oksiƥ.T_63u:|9 SW(*dT}sG2zB(r­U[`-΂W#)By׵rF8/EҟK^5Lk]&*.JRf)i3(x*E1`r(eJ?sI_zҡoI9n`*#R"TD2'Tfi%1"+Z"8n]}l 4BvKquᯱh|h2\pp,++0hŨ0P D sd7Jp+"yFI@~QB"Tð+LYQzeѠAh`$B#l2Fsi`!dI&%|-ςL pS]f* ߣɧfU$[éjSq0΂&'OzO|kML~p5t'):pdL㳫mMIabl*̥|ދg>C5yD~5lg7܋;ȷǾtzi3X̚yF~3S/߻tp聇L.m[ZݿzsSTy=\3zû7ǯ߾I߽:ߞzur'޾k'Qp`p,/O>4hjڛ4M[iB|%U,fqt nՏן_aq5I9hA4WpBZ5=_% 6ep>J Uh*L(E3堉m ~jfH~zΉiupq4?]ׄZ8Psʇv] ,?}^oW)RhNsĸߩqZ iO 5X jozOGw1~wNO,;/陥s/Vtw}wgg#C{ʃbNZQے )sIIꥍ8cD,;.'Ēx[Lpu>_f]h"F>M7yn0`Z`֌,`'`i $LD[eu(0mLtcpRI1 g9U%oe6%mðƟ5WZqL\ק8͐!c`>J0K\?H`gt OA*A*vҁIO&vzGzZ B9 V%aå O=cF@1gz4€-4x4`mŗgLngٺo U @CէЫFp^"?CAjje}7>zK0M^P$-#챎̍`8>S lgbCl\"f:^/ϳ%hCz!Cq5W {!DrkPϟtLHpFgIxjaՔR[!jm| )dw(x8W<\P0vY4a#A29HsٻF$WHn cvg<ْH7XHG,E t*&#"22>@嚏TȬfޚe ^֡vyl;u%  i-MOv9"Z^82!a/:Zصȃ\yTtI蒥 |ĝF1"%%d͞=R-W>c"`5gR əD\"YZ49tcl?{D8NԻ[A1+*9Wfx7(8O3zKן{rEް C-M*z7)2fR!dDC(%.f#5xc66z{ {/ޔ55_AC>Vf .7+mɿFph/GT1Oxyf8(g[#QnCF "Yì<q^3C8/wq^$8/8B1|t8<"PBi 2 NJ k?2J}!:K"&NMx(S bVhJ!i?6&N̈́1z:h ܃sq$qi{Fh ; E+~ 7W/7*PL&HT9c2riU > }  yCU4*c, Ho"u <-([TIӄ]WW694vuk!m%|O1-'ޯ<#y\F,Uݛ&JBT' "zfI2Ds͜eѧƉ!KI 1eq8$ m Oݔ8Q }G;i whjHAPeW[%p=Dq@Yü"`B l}sO!H)U9iF)y"Rw!)XA $(T-ŷfKn YP$Gb)$3Sc.'#:Y &M^Rʝ4h>.#U&4.q c hv=jD{lq❎HqJCRM3jQ@[sn.xp6rwR_$SAA4EVNBa`VFÄ]:(tv7БLwW{k]@] _X]5[i0°̀K's!ة\f҂le,2̠%BB_XS*K驘,-m7WY$\s^B'cO\,%՛4W5<8+sRN{ob ͝?mqq)GϢmݺ*wԍ|O@{^#/ձ'Bs/sšP24AwM:iCYt*.??cd Q ^dEtƘh O>3@ s8r6uNE@%9-ZSkխ&V|HKHE*'p\9/ef vi|6uxw7F\38op+Wifov;tS:^ڭ^>a󅒚\x,`n[8mw_@7={8~9wq>:ϩV#;7OP8?:}8 EӲ0P5^YJevW48.ܢ}Mu\¸/u%f6'N\'aEs7rF'7 Rf =!J0KLYʅ#.~xC]s9T 1 +߷!bJruzp[(KsN )jXAɸu&t.A5Oos.d6l?1GAZg<. 7zOS9Cŏ}EJkCQF.]Kx@ "SDRxjIFd%^z'r3B6bGcN?-?}]>*F|ɗ)[ SQ2U7.xϚR}Vu ]}y.z9l/=&@4_& &egr8. D-24—bBԅ! 2C|VulOZ'-'fJ$B.fz d )^Y!(%²%sFkIscc IJ6& HJQ>14aZiSӬ."ZbB6xtIs*I]f_}7Bc".i=࣫~fg@L94ϯRBpVU7_ tϪ$|1*gW\\JG$-'9.+Py0FyD7h$mјajVdSc o%u 8DžFD0L)t2b8p#Dp\ Rp3Bhb$'[.!ݖ)]dJ@Ņ-Tj"rRź QCފҦB ܫMSH|C Y=ҴĕwMPCkQP $'5 rϤj@#knnM;V0pEQ2m1-Ncl:0K !8 Q:G, \"׬ٳ T;hk! A| )Lǁ$J:g1q m{!Z$(g{q`TYR4F?ѶG#Ji^'YY,Pαwn/߾?~ wY[ ;mCm t#PUg<m)O q@~i (U0^K\cIS8awx=k>gwܠ R!I/nR+!(%DsLP1(brκw,cFr6[9hƎ;6;O-&8_Np}kht+l,M>G>@Wg2{:;4:.ӥnKhBc  x3A:"$74O#ޓz{4y^ IK9 qַ<1by[賫a[L8wPhPov%(Su[E %kAU%#Z{ք ORV+OJݪ\ח7kZ8آU@d݀Ʈ+NOV{05Vj65e70g׊TŴk1m.}%6;H٧nGn\;1*alLˆص^]+A yJ%K/P($TJfik{,]ҷXHP2hZLi5oopu?n&5sFlgyOȕF-RFl8u#n;ݳdi>kzmw>'{*'qu4 3Hm /.R<,rD7h"KhʕTz[ȅ]̎Kqw]PR|h?\ןNX .E6ohBY+I75kЩgSlZ&$THv=9E0GGI!vLY`0:vR]{Κ3vU:󅝍]}.ZTm"Ӈ4lN2:NΟЄ,cGբt"|UDuG,lL7Kug96h'M3* $kdu\c:ZMNzr@_Y{[R."9sWhHKhlWcG [ {)"f ((h+JxFcA }B2\t(TQTBh@3CfdYtRPR,1YEQFTN&&مб?9aKkAv5>vc{{ĵ939Q#c–Zk;d.d(YxRIL1k!Bvcuq6Z/-+_TbBӂSSb)SN5렚ER~q̌:;%;E):_\lR\B*Gy$#F"J0={2;Îܑ9-N1:vܸ,9oE\Dя w{bӨfOoH ,s&FgRM"6]$CQ(OA鶀wg";>t!P| %\qRW)D"+3H#0h| ۅ\j1{_˭G|K1Yluő2H^ 2l@`I 6ǐ+_F.> w"O:e)Igt\Iڡt:t6ɣpl9!dLɛstEZ,gyTzՠu >e|III"VXk?ZW<(Yum; ZuE3>z ?h+IB-6N/_߾ϿR}_߿{y=0ɩ4_|toQjo^BMվ˧uj#}-QRN~~=vWnf, !#l6i$͊xU ExB&k烚mf¬Nfn!'Ɵ3CW˓,ʭ 'nv=׫->;n1_;xp_"*U;9l H6f7 >WcN|Ϸ_}ٳT헝.iY;:w}ό&<|R[6wk*LT}brGAle!;F**빊L& EB2s$s*mg[^OpMk>ߎiB=JӬD&HoxP%Ioz\Odwh'%$1ql;uelպ;|e(k[î]w@WɶGZO5n})|{lbbݠT\B:+{C=z Z:@]*zTG?Y9b6!)r,"SnۨU&)v.&"rTFsJ`pLI4$;9'^q&0F <xOY,4kE֗{7_:qX3ޒK^$$[i9 ,eRGE0CP[ fo&vb3lwNgOFOHV|ˆtā6[(yM=ezmF6"^[p赕z P]{ae1!}nM;S`5l\K 2Q(` hFͮ.&da( 2{ҖGǦ-SI[rA[ؐP mMbt1|t61C7z=bZW jK)J-"k¤%ɢsQr(7@ʠe2 n9fcYsP*2Q>N>LY".`Ch)IIQD0Q)[F ) حPO`JDm!+0{_bX0I,9㦘dU`*It-LYs6[8h4).=[icһL N1MۻW aOfUt,ͧۢ;׏JV7~Q晷[82Y q;Qk,ngE$59$w?ە(̭O%sѧ5/¹fZ0̍ӞZ$wAIA0F3hЃmQ%[Rޥ:fP;"S퉽Ԗ=wBYA,9z! F%P|Q*"BDE:0H*/|W>";9zƋCLV~(͞}pWPp )PA|+@FiH-<( ?Yꚥ`Gtрm".SkCBPjC$2"V@'zE\jGR<dvI=Y$!(%8|5T1 L]:y AS t(LA2f%Ȓs=Xs-8E 7ckq财<` 2s}g\dKbUբg?P2tQRfX~ّԅO T4q1*Y N:Ŭܻ%͕PD(AZ};c^)jr&a6ͯm^m]"6ΐ0v'y[ d4)zqJͅf!E'S [HliVeFqX5m 3.i׺i(EZ 7uٶ)D|TM<^ٽlCcj7@4W[ rVyz+fR˳H&ov_1WQ?3ь{fz0O`Ma\zpPdvaBzM`ǣp4LiOnVC\5R{5!hz~f37SP\"(OP?^!].zWʣOS]$spX}q>3S4Jn,◓0 (/?fK2Ll%N ?gAx>$Ei/m) ~W}&zX1STQbQ+ޟ]8f47u|f>|Fnx`\Z~vdzۃ  ŋJlPf8~(O ᢿ߬f³X}9PQnM B>1)R{V)Y=mdT٥JuBS-Ev!ۇ}N|SZK$}j%~ԋ/5fpwz)+]^|ܫ7#ri3;[S &Zy.ָuV]_ArL {&8?qC2 .']LkS&LR]eqʾeFNecPǯǼMi9Ӥ\<e\1&_D0c ~=6&#'nrQ+R,JX/*DrSWTKϪ%{%v*NƄy ( ;.$Ee)h$!]šMU y9,;P&W%(MTIu~Y7[ě ᢃŋZ'i-|{AqUQ{*j7 {,n2Ov& 'EsƯ^hX'"`-ɓW\ş:A lJT7(V'$'#X=qlU,Aq̤|B*ٓW\AJJ*QtWߢˆ`]3kw5fp׌a13kwY0dp ]3kw5fɓ]3kw5fp ]3kw5fp ]3kw5fp ]3kw5fp ]3kw5fp ]3kw[ i;X21d⅒+˜RY1JcbCA ƙARL2b bƷ O=LUERUOga"! r#c{PuDqKy-S.`k U/8ĬW@|sVk/@1q1VَY5r2nz$[I=j,rJ 9L8C Cl4v*C]Rn\vHC _;giLK3q KnӢ$JђdKk,)QO+LghchUՎ@%ɺW+0CڣqRHH`Hs3Dh;U(Pʍ"6bEw9l1pEbF4vꌜURhT"JH>x{Ђ>t"UGR}8\t 7S/%?%wAQʸ/qб0Yj${lDVu /m3^*,jR=-k%;!E,DDH$ ƈ' 7g--$t"<3kb2?D5l.Rķ0k_e#b Mi*|DžOٛp:cqʏ;P7 $ -L2Xu0FI$ IKնWy657RS/s_?s.]g^+k)L@yM>jb wBsc%bD9AViǬQFYJy>%,t{Fu54ݢpK49EGb(Yi>-^o`S V'%NO'ln>"qJ f,g)4FƐK!!z!!G$+倭ΤA:# Rre2l( \ kq w:jA0DrM ~lU(i0b@rvh:,eǁi_i7cQrR YB+-Hp$tqP8m3~_`Uxt!o#llt{ծ)e{_n!Pf=sZC!a#wD}i0% ![z+'pv=wܪgIp0Q'pK:c k)i3(o*E1`r(*n9PIss. rmfyk1u;"E IEdu0]afXTq8"` RN%7oq : a(=K)AaTy>/TtQ黿uۛWJ0S7LwZEW$wIsA'0l2F1yX02~LMWrK^Vg'ͅ(T@958 6pRu.wt/=uCZ[⬛w!~?4iˌ _==ڟ]p^ C)Ξu"JQ35e0}M\j:e"yZ..Fuo_.5t- ?׍uqwG/|>}w˫^}&G^~o0@춒L;@2w5*N׺˧n|~[>XɾP\1)yZo/v]*µ{OyC_-M K; iJ0 -PggYq*Zlrz|@e_ F 8}akCь;"+~ H RUpiRͻwYX>tW"iGg|ח7p ֐Ww*Hk۷; B7ƭ-Z@R*m $6E塳r︔1Βlm=bM QKSe4e,!ďiM3FÃuH\v;VlNl[G7#kD=t BʁPB5.BLѳMң4GuAҌ%aF*}8V/=ݰS}qNN$L# E.NG/w"ʽz7)Q׼Y`uWiAXCURYRe cAtc`\2)%E2 u46<6 O. ;Q7L | .0\ ][or+>%%A^r EvF_%hKʲS=h‹&5ǀ%ltUWSa*bYHm1N RdFX5lB^@~ Mן&GT|/}Unh\?YELgOEnm}!2txĭx}[ H=gT1}ϳlopqWKz .:e[gc6djL9YM^knxb8sA6\\騃șk^B3/LcKڣwS$;^tP£tŎTr,i!h 9q&xRd y&K/7 r! !8 Qsd]j > 6IVe(eY0N3 ,Ecjx]1d9BZ;low$\8?OMys9S8ΉW%#4\/~5..'7jx,SuX0']#Q75B΅? zHMk'IE,E'd9DT @C9k4:qm$ ){FmbPJ&ƲfT^uJ.%%37x6,!b:.{gY絹grVݡI+QYD>'[G9+3"Bgh-,N^*"r͑[[ U5 -gVyPن$3%W19 zRM )䴄f&TVjVt9k[f]7+tAt` ghi37hi}p5y-F\1S~z'UH&CRIKpt^(z*#sm֑[8K#VٓjIGItR,N@e2R-c5qv[zJ5[Xmej me[({[xT[?-`zw y7_ŀƍFO cOkt"-x!!+J@r&ӨCy5)kJUH O*G^&}cP+5Qgv̅MªRfAڝykb8AxVǁV{YXjV`); ^%E'ΡHP*f c0,.ObJAEE-ֆV)j ^M-fUʖ:X{iCE2-v:btEuE7*h>pfPZ%"rR8(AICN?SJy^u u g{4FLM&kZv'BiD 9{:IuH:ޔoy/'>B@m:Oc1]a؝~Kp"f6 AA5koX-EN&˅}m۠i;/4 sh @`c61p'-8 Zu&b "dJ1 蘸Ϥd|V3N1};Өsk:)w^ȥB0.ei>Gk<\E5j̖́ {?M]_HOz{Y/e;]h4{eNf{Z/i3RWVHE\4'RVA{")HYO$(Ȑ CΟ|}W)Kf6)ӈ >^%+c r=>䋞v3OiJ$iޅYف(}10W\21ave-2)QlB"Wqa1Rg'^98&&PЊۧ! h( !Mn-ZvTv/t%`y f>)s *$-f+ 2m{孢Ktܾٴ>և%AX=/^4 ǼP8+?KXec?x?a2ntsB׬lhҍTh9,wG8G:tAWHr CFr}H>XU (ґ~O7he2!2FMpɚ*O(Hrȅj>#ggg0I;t6] ۓm䳻џBb>\-Ƭy'?\Q\J2 2(ϼ\&V[Z&$B:t愁(ԥ UZ.p&Yq6>MN4"BGJMXMx6M%cV|;T6`yf\#˗A f_у"W󠿈ky_IJd;|υD+at6_l@k7<'5 LVz?n :NJ EEJ&##aL+ LzQ%B0?.˝)}+on%\թ/S/wNjUȭ':ܶ}2뵙?IF_F7ۧ9t5f{e4ۯf;!lhnf:xKk#<1Ϛ߫=\NceAq(Ҽ;lH 1 5^ePbȭ%P(Q3'LnkڼJZ5ӑF RBƛp]򘴆&/A1TCIL:z~$α}ơC+v$~,G@̹`ω0zy$se^knɏ?rRpVe2&v֨WFrt^.-A?ƕtE_f2 ޚ)z8_B!p/m#5j_fn~]/h4y$Yr/_~gmui > _aׯl?s|s^Q Lդq}-ivEZ4"}wfGY>|Ƌi=j'aL6>x%02iCOEOMOF#!!G! ʆ Fv,0HgUr#+9;Q5u4gwAz#2&DyK.^~l?}ȧ#W-ssE/,BiS%:g;CKdLP8t\Rp~tY@%n3"K"2HZs]&[&`?D0EO8FG)uc4i=NomOOԐv{!+uŒHhb6=&ZIȊ3 C% ,!v)HO:E(kydDoÐ5i`pe\;t 0+f"*iXSŪ}]C>sf9yjeZA#vne-%xVb*觵^D29&1a}t, zSLf7WKGvY yx9С@DJCJ { Y%^9[ZFe_>hF^뙌VZ%et % .=XZLL!kP&N` {kRzokAg?j8%79Ed!+K5xQSЁ%7-)MOr~cfjr=&y0;_zӻ~,LmV?c7ি⋖׃+7Ms. PK1grr0N+?l:4O% nZiNIWIKɨ|_n”ޞ"EyFq:Ln{;Iu!^o|ۗjiTү0^]YS5??{6d /0Àu2`gdan`V"KzL ߷6eYntBq,b7]էN|qxXE5\TabK--FaI?(^NGIZ:x?k8﷠ ~Z EψUAkY:1H~XM9?{P<^E άkʹA*R,? ba.V4E2T7fSP27 >(űp7GfSB_ ;PQE/U%g=Igy)a қTiΛ~^n\B]"`0ۇ?zM4bg5D. Z\8,g?:.Д[zK/lz~PǟٯCb 5T|77潯92֛JO_o+VKfgӛ)e 6A]Juld+W㯦7фr jy>00c{<] 㽁S@^=?|l,ה:4p]Q3h+-p7kv;cZk4Ѽ&o.$Es'+f:ywv@,6Q->z4HN=0puRxE!8**ʴ!<P*)iH-*NIœ0a!qb9Y-JqTQ&Ew9lVJHH!m܌nACk{(0k\2ѻ7 JtaZ=WG1b*.WZg<7\YsqO2M0"FKF``,Ӭ Vku=_h }"m.DtkE5(y|[Mц@*t@Wy-gWS/u\K^O|˻Ec{L,c05S1# JU+&9NxQ><^<si5tbmg< Be7a.4(w;Ve& [ZK} Vqk6ebvw~q>oQQɯT}Rŝ)S(Yk@i HnSHXG"\JlB U1kQRk |ȳ̮lzXhSTyIL<ӑkbGioludۄw=5``uR[4E`p SRerw/Na1*03AJªqLRۨDf֝ HTͬ6 •Wp C$ԩI#V@b #J >{F2] Mxs$]rWZFKXIhV"b5'B p\`%eu>&Nv݃yq8K6C=91x{S^8,A{(F@%_;@Ї")ޠ,a?+]*nq]O"G%)np0K=H[]F‰uXTR-+rרk@|EUq2$3K0Uj%Yg{'<v8b{ ݩ/7)FiaQ6rKI`~5Q"a\`$:=V)n[Ei 0CJf/h̼I;r&n{~y։1WOo}S[M09YMBn)Y0K92D\신4FO_шx~\}UpS2cmDSXе $3(F07<ҥ)A%&o^0$cx2# %ImBN0+D{CJGt`ZD"rR՝5kv쨵1ɫ1v`7d V \k`37 Oȅ4*XȀ 1+bH|@N@ >9Ҙ0[wnևQ?4ckplFd5"4b7R/%La,` ` _$Dio 8*xB3L*S+ l: v0Eju)A9f&aKcjrug ڂ^khיm ʬE;1Pk!TLVj&uئC0Vب+#mР<^܇^l>6;vՇ-gbhȼOYF |mw[oE?>P~A4%F]}ɂEȖJgBqhoMpyv _A#X`wRc2*z\c瑌pksaY "Q3/yGV~?uxᵬLaң*fJ* S=Bd 42@q/Ph:*#R"TD2'Tfi%1  Ayo$lEn+u!Q8 Pw$ 6_cϥ.Pmɻ(Xr{XVbTXXaQNzm[M]Q0X1K0Jc b5B^a:A+H8X)GX 2Y@KRq 8+c47F#4&η> 0 |<@?T4<)=܌).=8Xa5?~涺 ?,]>ϰI)1:^.CP<3g]]ԗ'|@g^W'^L'G5QEg̗ {'>C1D1~N*S3X n? 5ؗ[bΚ!@b̻0)| #F1Ab>eda&[%hsljY RH-u|x65SOÑK"yZg y`4n9YoP.\~񫣗_^﯏_zw:>w_6$Du.vл[4s5 ͛mӴԳߢ]Fs>rCW [,QeZS=jW!mf,\{E.6Ͽ1?znqm0 -z JM0Vmflf6҇U- ϓDڟ(ޏyWuK`0:Kr;X -Z^iK<1w*DS(rχW1{w`..?ְOW^6KM|^(! 'LsUһM5!*W ߎ5Eڒ )R+*Iꥍ8aQy,;.'Ēm1_J|uA답"@M3yn0`Z`,`'SK.V;oq;ewh&󶄄emȮ{DGUCȨkpU P1L 93ƹ`1X9c)%2@JF/tJ50;VN vTyh5*Cz Y@Sٻ$63~966hI3j8ciFjD NL_nVFKUs`휃V Y1S? WbFk7Ku" K \*aqМZ(ڜ-rm| 逍#ΏzQu OZ_qu^Vem[pô뜮N}0:Eh=\Ml*gRr!"sq*P>:(#?_"XX"AF-F2iۆZF h4ԻLJ%T8 yS`X{%n9 Q ƑjpdԬo7yznlhRg$ Y_i|܉#&!ZEpװ cf3hnp9c&Ft 2G-^]5^oH%rڒ\(LT(i]N[)qJ0!Ҕi=u+&F,B0#LF"`"M1h#@ Dp68[~g1`j>iL{@m70h\'ķ.ewiߍ󹹝]Tg||+E56B £^EoXXd%<$wXìT.n>SD1R2-4*0"h@QAsglrD`( j@Cu zz<fq{կ-,O|!Wt a'Z㒑#DE2OdhuD#radfHbi9)r u"Gܠ7"rL# hdr_V8[iHS9_LwTHRVa S띱Vc ^H<6Bj4dQY-#2 k{m4Q>uiﺾ+BP|Mr:QM/bv0:S|uOtlD }mBZ.qmcWۢJC 6j8Sϲ':.cGSI/ng!yENubs2Hm^*\2\XC)AJ;eY ȕ"quwB/+5(z)z wdӁbipd(2-zRLq/TZ DVyj}KN9/~Ln<ӼHc7m4c[DX #jP:/X=*pGQ+,}P˷ KLYzu/@? E/4Tu82TޏYu]O* 쪤Ȃ66\0H9Dn9Q 3hRi ҆մ°\4%YD@^{WNIRk<%pB%u [#[F7UE9$$V8m0ImYՊQ!(pib P ^;}bJ"X=B#*<5|<2+a4q☯)8E{dcc٥x.H)lZFqqsZTQg}+mT(#/@}9x u( p**rXbS"͔p G(o"Xy UDzrubOoӀ`d :i kR+$ЮtA:6 ro -KQJ5P/uˁY*F╡77aQ Ŝϟ:,Tv/r)Sb٦w߆T6W6['4Ɲox|,)xSUoHILRZJԞ7)ƻ=Pg 0fJb٢L V\ކgr ɦgrM(2a&W0cEtyb*,a6ZFNW e%YzGWCWU,qk*U-thI*䪣s+Uʻł*ĞM 4%oi " 0lmxa8hyEp:P@Ys-c18h` U,qVF W1K(iSaTutB1 yM?c[MBʃ¾PGo~}a Ry[Y:M Mfg`\B&4cc`bҹVpb#Vu9ʹVYV|!_YHv H6`VZb'5+ $MJ%ńzKK$ N'>9XCW4tJqJ@W}E%+l:]%Dcۂ*䢣3+"VEtEB'wt[*JyWgIW:nYr>DSg'o!㴇l2͛zxNc#2u4`Jկ7m*: jW hnՎՉww+if}GYS=]vt16+l ]%TZNW ]!] O0\Jh5k:]JFpGWHWuB4#TKRn7aܔ`!ehNpOÎhe)4#!"UK[frtJ( -h8MG"ZCWxW1tw9pVmH|Z &Α$U\\ޚ-V=GR \6&.C-t;tP29ҕ]5n㒘4̎e ϲ4m~~80qW-evlD'TMNM85B6WQZb'\p[B섖4qBɺCbkշ gdLF;x _dH&+ "x~7͸'i>nG_^sݎ}:=o#NSe2 cՆ\#q']/w<xI+}H9Dn9Q 3h؂@SҟW=-5p~bҟ7I\Iu}Zy`G?M2 {Iwތ|ަiB1ԉ=Bf#qu5J\GB`NJU9طEG{Zv&|ԻJi"(ׂyJ@ c wd!"cTKeߚ/黌LT4>]8c(H}zm=a䐤XHM(;Q!( iblSI2?kh*s npѠTvd1{3Vwb*%xt⧷!Y ctt:7ބgWM̮< IH=a0]KY}RѾ>!}}cbY !V(q.*J)2SGVA ƱUWrE؄ A2 Rc*5(K)µ0A8zpVkoL3/TN/X1ˁx@|ŕoZ<`Emv7/ 4~&>L֮l:ڣ [[4m~s1qذqXQ?nq+C/m>ЏG}0/Fp:yc-y-X7^c"`k U c`逰ڐP\!Sz\TknFoOU=LnCtksTazmupہ/݆l0JOa͠]}$ 2^rsۇv~uq84%_-ޟgkbnHW hLm4Ƣ㝙\]{oF*vh!`pNrm&AecmIdg`U,zY$z,fUzl>O Vl3 r>"ϫQwMr=ކfOo#cvpm:fYV%hZJ[Hu$HET#@ygm1[yਥiz˅U?WZev-Z] ]m!-?EroHk-=59 sY04\`D$| l=l8Otˍ' "XX"AF-F2yK-E" _tz&E*L>C{z7}) r^jG,9M<ΓfF.9rEQqR{]4"D9Ұa%Ddjxg 2tF M+q=h4+P`[-T@8Nag"ߘiz #QHYu2LwZ PZFcAFs+@5ɵ͟7A7+b{v{ܭהzY,䭭؜;E%}y8\؍EI ]W]OCZۚȵ2$m}שK5:2࿋I%]}!9.?뻇6t:mrյGZ2fݮG=?4ylL!٪V79y@ۥ.x@:L- ϲmk)L'MM۰Id645~ ZScFXJ²8#\N 7Oݹ!B.R\]֠cJ|NY3rA:]I{'{bi;؞hكq:Plc=m EQEPR)ґJ*OM򙻩[LL|/,VK0T!{ A;_ 1{U9k,"WY&U%Va MK;3φЀm_h(~LuBCNoiu#:Uʝ)j ]+`kۏ*2,d =se_qvMg;6򅎿Ẃm~-RuTG[Vz+ބ(`ߤgaҔ:Y~n=zWTNi LY,yf: *f_<*fKMS%Тm.-f֟N*?Ȱe&}rQa4H!&X7_y]I,0 +_y=s3ejim2g?񯃉ay^[`8bK6ۮ.!#'_iT⁜+˜R:RRix`,(3`2 b힓=C*{Ɠv}\,/zwy!0+&qPAJ- rKBLx`>i.WӸIK Qb54է4FgaltP N94WTp\R_Hz$ $ ގɻ-[Sw{+d%QyXXl*ç֖ "ՆHdE4zO9Ԏ¡V#xo=$"*rSJӊ*SG9q8hj0q9:XSŜ,z;Ba9{> ;~/eL^dO)߸W?q緱LH:pGp;#S);:~~9ٛߌK)ҿo׽|ٿ.0t~3\Ev~s_T3J9O5)&6B`6!~M웛abwzӿN2+݊SO;+Dj\kU tA3>W )'g;o 81gf6>DssM/@)x`tUdm5g9C3=- /)koɏmCYwR̄W0B ;PV~nMOE؊/LU9ipAdS]R͖A*UA'[|cτ%uW-+* /_挌SW+)k gT^\qy? {V)M]I><_96N_?߁bldf-Ы竿rh4_30#{&ROgx79[ 7ߞt2fIU LRwp,omplΠߎyZ;*I1Wn .AptՂK$ԁ*5 [!q' 4^IZf5|Ll %cQKPJh)+ X͉B(򧉚|/䠂.f&Nfyfi>5H[4- 9ny50 'xIq6r/J>[%i |QP&R\Rq":TebfəSbWE_8,]6Bs8j:V-5e8 O$MTj,d$ln\]yuZvީxuۛ`&gAq:4F 7fͤ& .ݦg3|]oA.'^R$D횚.y2=C5yA2st̙!>\$!c#ZlOiQBjrc;DKJ+h)0RӐ|\rzvr`b83' c#fHpXN1\*( 9 ݤ,DhDri=zxgOrk 6`ZV#>F8vMͭڶps/pۡ!X%RPEBzot+<- zF.UMUU .;YZre_M:;.F]֑O4Ѣ.#z#HmLS $2X6k is4SkN_TطfoRm*U!3: l,`4"TFS+:/I/DR]2(!ŤR\^ci iPpt.+=Qs MJ++(FD&8ͬJ9`E4 Hn H0kd'hČ}" qN8sV( ¹r|76q\|>K_ɦg?(f^;~|zv~zZقP%9Q.*r[Q #$Ե-eq)~t=EEWuZ=^mcW'{s y4<9T؏k`<zOޝqGiTM%UvI(/|S1|c1zc1J-D,I|0 %?$ k8d_4^H&~l8_)o~O}OO?|z'ӻ߿ᇷf@sy2@< keSESyˢmMz[*o(u}4 )@BE_ގRMOΪU'T uVV#SoE =wF?ovoǓocwd]p^3]8|aDz1]ݍk}f29j75l- 7tb0 de(?+ \2g'kcf\n C9P¢7&m7#Ѿ[DFו.@Ua@͟-t2J˳ A3%4K[3J -2^'ӟl;P/~WqZo(R:l"&Ia*,$mĘhL --,Y"I\~4]gl]ӠPAϧi<}e,ɕcU5ѳM]?g`fhbyn<|bT֗ϣָf^ݸLꚕθ5 q.*kZ4ۺs3|H㖯jJիI_~F b~l&^JAE+QQTx!9˖K.?=[<|T)(r8< d yΤϕ^&n`ʅ9٨(6;n#@42&YÎ8fC,E%r낍ޜ췘 i>A.?I8Ks5qPZo}tv2bX*ɛ uWQß+W\{r* eZs߳WgƲ9*tWFe_9\JA>SL?h+آ$ 5YQ(`/J[/sĵ$]Lj115sQURR2sÌ7`SAY, &hu#JO 8o:Vcpx[o&f*k%)'>J`ef*) ;%GT%fd2y{AhT@Mȴ̪ؐd&YB+&G?l< <>Pe5tYՙ.%19%ѤK"ة=G7<꾇3_L~m+yS'ӥh{w%T3jVɐTsFh+"F xd:`K.ORfihIGY'I,阭ѥX;%Df<Ό*qac/چP\\BT6*!/֏;ONl4*7M>X퍇xh xmIBGPj34$4PvVt7BHs\BTbPFmԙl;flhZC,ht <~2k&fk4ئcO^iz5âCŲsBZk$MV* wf 8ٰ\撠0Q5w,K]FL! YA$<&OEc1ɜ dT'\|6˹a[6%2"6̈3bψIlL:Æ-g J!t9rxo:#NpぼTʶ^kәlQ]ɶNh-m϶^P賭l glk-89/DZD;[皚WVpL\{np9oԚ43d=`x,1#pubM;4]Z&TiD i\[!"Ft \ɺBWfmZK+ӢCtECg+tUЪUtJi+:DWҝWwfͮtE(XOW/AMkc坡ִ~P{ztea\vLɂ"ҴLiT +k]Ү0Bw֮ \]VUA{CW|ˮGJj7JvLW50tJ۲v](:CW ]rd a=]@eqU==2V F-h%Tyi9t >E =M@F&5+,-t+tEhnL(%ҕd!"w `]u骠lێhOW߄0@Κ]+tUЪ%ҕӥ+v \*hmP &{zteHѥU{H;U\ 5^]pToS7~e.P18QC̓8~LNe8t't6 i gՔR%B(zJ׋^pf՗hyxReVt:eEcW1:gi-}L%uTMB]jN O *xӯP؝n0=^\ЧEr^D>\ѯW6j *+Rce*9,G'zM;Y:WzVxfV !|Ϊʺ+άu9ԳFA߳Omu՝gL 9GU!=;- u=`2',+ V^zK,Ksu2k sC&6ѝ;j~kA3[lq[wvq'(N(-t%zڷdCtEu ]Z+e>0ezztŅPuv+tUjv*(-td= bͱGȌElZ;DM ®tA{(P-u ~E$$Q11|k:3⅛ōebիu#{?J$}IU{EVy(缷$7Z'Mdi1h4 \K{quP c \/NrnTMgTZ JO9,wpuC|Vc4\QR*37琥z#sw#*Im]s"6OeOݗ wA̪qZ~TX" "k7{f0)8DI̓@6ρ9S>ҙK{tΡΑ%c9սM&8!492": {Pޢ'BAP2+@p1H<%l5$r!}^&O]ɥzWEՠ\VqL_p),d-&&ܨN  AZF*]{oז{L,Z\Ȅ"kK_IqC!jha=g |MuWD2k+\4H}"a4#2ƇDMJ s6:CJFϙI1 X?#ΈFe$swyf#*3 Th"kI=N*=ڠpjFPFU͒Ϩxt9h* kۍ븲Huu#`C6@APW0M b+1ȿMbi-l^.kuNVæ tF{ju}gtkaM nN'1vR=cGYG7(mm fQUއ6Z:[PR@j%aJcuߺq)hK -F{euZm$B ɨ|W!'c#ZZal%1m/XXeàTGўuTV@N`NڨP/ &THd|"0`)FgfR|WZ}PR6: "(e*9Ǐ^A [ŬΎ]`10h!ƻAoDž[АJhIWo#f B]e &J<|+:28Œb5|1 j3`1<ѫV W nk 9v5+ڱaXD a7HX] @K,brjFQz^:B/DL"|S:!  UIbeҪ*QZ|M4O6"73 7?UeXEcp]cRCDMRuPkV]@3`5!]eќ5MVѠ8Yx`m@ [SW\Dc/XHDIؔM@[/m6ZU)~0ZoK@~` _e?pggo, B4tsYhl)`ظ2}PYPfjѬ*Ǥͨ@5@RMwhUdAh`EHN֔7vLZ #+Bj@SQ ApɁ-zOV Fp8%/C)z @5xp4T6_uK2hw4& kEЬ=P*GR!6.X,vQ@õ1[ U\*3'~b1B`E%蕲`d)zBoͧf{n*E-mMK]RLjRV sw=cېq.q)ba|햞o_=6oym>>b[`{h"횯xE?z%A\~!z|nI?ln/޼&VW_$7 o>l?i{u__'.6?;.[Kpm;ҷK\Zo $NhlpayF'ř@X$ x$W'J-ILYIH@$$$ $I IIH@$$$ $I IIH@$$$ $I IIH@$$$ $I IIH@$$$ $I IIH@$$$ tI Gt0.h͓\I΅rDJ@IH@$$$ $I IIH@$$$ $I IIH@$$$ $I IIH@$$$ $I IIH@$$$ $I IIH@$$$ tI0gJpr$i@@k'yIc(IH@$$$ $I IIH@$$$ $I IIH@$$$ $I IIH@$$$ $I IIH@$$$ $I IIH@$$$ tI ofJh@9h-|( tI 8l4IIH@$$$ $I IIH@$$$ $I IIH@$$$ $I IIH@$$$ $I IIH@$$$ $I IIH@$$' lͫy)_ny}~A]{ow7C 0@qv'PF%.[;Mڹ Dtp}>t? 3ѕgf\XY~m-|bHU|*MDtp#BW@4F *磟8;c4tpi髫]|ΐDtŀNCW 0):]iJBWgHWƆtf0vb/Yth9ubaUtѳGKqva`<^}?c^lhn^ޯ5r^(6ؼ޽59m7Z_hukr_|ݖa"`zGk4åiT%uiQ(4}4Md]H{G~ڠQj/tut\rÝ<&ka(:C?Ltŀ-ƫ\Wxt4 }.?d.&ZBz{s?^K^+DKG.쫏>\:槾}ZZF^!W_]Aҟo-gOr]?ln|?qjvODSZO߼J-8]KYf!ڒh{N'GtʇMCj֖rKm,|@h}S; i3Kd2--zNv$JҝkL/F'>OmBUμ*zrՐvE x w3@XlĮwNWvCk0tiCN,+'t]tUע+,thvz>]1hΐL DteS+BW@k :]%y'tutŧ݅SMF /-M>9ǮgR 8Σ*n 4~u|f$iU/s Aå4 ]1c; #'$tu>tbTqG q"b4=;FOe|tJSѕOZ/,b:]1ΐBIt< 8yf& K!K~"bMCW 7YLP&%**L <]F]1pQ$tutk4taFyt{w>;q[?'n.K甛@ˏ^>=yYggRNN>ō%Jdg#Ȁ\DcI9ƅHcYt6bqmYhA@SØF&z8q=CR/[mF.HpΙ3V0b}x>=p|fxNMuDs/*_SeZq(7x^BfP~SM76^~A dy7M-]D hiǫ4Gt2u3><{ϿFE^ER+) " Q91Ͻ>ZNG2 Y\λ\ԂG 8>\Wn_#  5*Cz ^ wE%Kʮ"4GF'}Zf~l84K$p] ֹ7Rk/QA3Lwx~tZڇMn% ֗`z~LI"]xa&y?t|@+WI pe?vѸ ?/N`;Xbk4c88Ѕ6^HzfE&^ ր\0V&DnE@#"v3!߇6ߟcI7#@ɞ`fYQU x|lT}4:j9 `LlZ72뢗jp y?Ӓc UM'O1`{ "^-{;toRC0vUd+aH" u_zD&xTN+,{mwX g>b5sqth&,"9ޙ.|i8VJ)߿|߁$p9}7U)չ(xo໭^*M9 =w=~뗫+V`,LdoI?$jOj80:];A4|4<_An<#,GY2p2U6w;p0wDuсS70bQG"XV2"" A #(H8Hwwn|ME#k\Uƕ&3v:k0zNf6;$)A;yٛk駉|K_ ߕKV0+U -- @qǑ HeP B\X D"$Znx;/E0D S!7FgaltP &>HubQOWu*d =!"}*X{))n9 Q ƑCg!o' H3{K Ғ^wYuPVh1y `ˠ )/0>ǢS!><(T6D",Hm{B7Qĥv.O7M~D/s<5ɣYx(%iF+jb ((9u6rQӉIqD筐8F6 G=#u:*Ȅ Λ Ny5P/by&8:(A)vܛ0q0q)fE/h"B ɂ`[% mXEQtg=\_EY9lEr0:;{u 㔰W 묆ޟ~4ޅӡzMNaQa^Ίa }D8,ϢYFgΆUèWVe#I"uu)fRj 1K(" x~]D1:"X#~8~ގuv[Yqlj2&,>E̾~]dgi᳧O?fsVV0`FaI@rR z7Qb N/ 6}Ho/[JE"H-2ktSA_.eכ[fYR sf9iC42@ bs2WՠpWKOb96cך[ZNR /e-)5M^Řg-D3ߧ7$ofCmʜ.I_õq 0)= _&Cotrü,N. , M><~`LLкi[0j|u3ޡY967 Ge+ mF ̧eі j[Nt[:6nt!5H\m4s[unC1KDяCtgN;s3튧itxvC[L}h&z&4>:`+Jp !!`x0euj/@4"nbkr:hb<մZb噶 9qׅ )ϣ "U)tB'R V ㍊؟hI<n^@:o~pxgGw/oSbo TCpeհoKJ[Yr^;X ⚣jA܌'?p H))`w v ;-Q1iAp^*m4U aU8Rn+倭Jt s_W8_,,#Wn\NX"\Y9٪Q0x%90C0 x)wi4/4d!j"0FJK(w^pe 9ZSGq]/Ky?9yzX]Rad0_ay>{8nn ]pܖ!0H9_NRyXpF%wȉR4gKr ,Ӈb;_MW}B[F};6T󠆹2$*b^ [3ԒcqTQ&#"( - lH?]k -Rh C:6dsm6{#զ_#>J3;7}|97Ԏ&).AQʸq1g. s`8:TVca @0R}N!%E,$db 0' c#fHpXN1<|;AB+zz*۲mhC>R|> {qD܀nKSTG X&t@{.2 ~ށI&\hLej41"dn'3|sD X}#XJٵ"fuK߯$i[d^?@`) Kpsn[킎VR& 1A}0A͍QmQ'|z|t^|miٕ[f ΅!0;ws:Q#,x[l{F:~GduX[5vPEN`96*\; OR!a:siz؂io-aYVz4SP: MT6 Ep]'S^3%{fU:zSpM%ZJ0g9 yT1_ugiyÌu;"E GǤ7(fEwZI%pD0ARK&nŴݗApМv}Yv:h| Yc98xjE+F C)rkq;nﶒ*6X1K0J{D B^a-#TŞa#v,JFddTcCa1O!f~wpmCm8~ .%}:U{xRfYc*iGNkeGXQס(w 2Fg Yٳo_aNurg 30 FGG!@?fnɏͻ fbwu;whC&`}T tȻg7;g[.hz L8`")$K#"b-VPXBi 0 ^h)knFOj0O3vʍu yC0qi=&/B>.a!B) p_-vީ/Iz 4 9HZjUY}zv'7[Bҹ _ ENӛ|oX3tȴufdYבY{9[a+/ْ )ҶtB#s$Ft6qmFE`6, 0868\oAWcHE%YXFjvbvH#O!@bU|ɅXOlJ H,+D鸕BWF^d!rM0qmCTDxH+*vjy)?S[fP c?RLch~XOzIIS>yCWf|Q\ХI& rFVa vJWڅaRꕤ6L^K]tY tzaULtܷ..tlO> }:oϳmn|8X{˽aQ\r9W7ͥӯRMw~z~1^|bwGmxր+o~5g ٣a8wiԲV3ɇrh>`%hhUZ^/ͥyJxyTmbɜHU,qVd*s}@u2C )+)mo k2%?R†\R>֤Pr:2LE$t 1h2 *d"{&^\hmغR٧Q9/}|ƣjW,R\91F럦\c}.342O>7P"cM^qn"n2p >sZcOblܯSfIqKz6KeKc[Ea1SŤj?ue UpI\\~R\ b̕$Nh*[c8a7a7Rw+f'vdi?^s9EF 4H3s%NML5AfmcD! `UJ:,!C}`F9tJRT"5qv'npK<~gKgVs퇍8 1g5 EDlq@Z,EDqVlec]]fH/,22vO;ƔFI2;(| QLZ$LZ8 J@FN5m?M4՞pqV͕Kg=qqQ 8Ά2"B K+oQ$CW>ΕOP 41p+x[f|O|[npW5n^IrTiQ+*oBA xLZC TƂ/)*Aй Eѡ E Μ 6 9G'dN0sc4ƠQ11Zoh_#"B k,=crYDmr Owȓ3"Lf̶[gG>.Tɧ]H<~Er(29Zٿ7{?ӗW~p%g DzPA{.# ic{3sIÃG4u,BZ$-$E\IIG<S goY[g/yU hNJMIl4R6Pj+:`fbs[Dܕsͳ ׫& QUg[@_1S'RDաԉ%^sLc/i h)`!(8)BAݠI5nG#8͇X,<{֣嘒!C=N *ɣ.E-[C0:\]+ۡvz6E =戠,+1FN!$RDAJD4YkŌKR0#&np` MNyQ@D2܂UI@|JFcFJUIO!myzLĜ0مR3s_ [4&%"3\L[0R(J'Ô09QyKv5u빗>_t#k?.*Ifý Qlc`ΆG\2"EcFICHU+!UХzߔF;-d|'gɽ1Y=vPՌ_~8iiV)L ĹcJŜh2# U$-f+ 2mmEysݛp6M!q>h0u+Yb=oXOY[# +Dɔ\7pU*J:\) pJzW$7pUս!$e zpe]U!կR+gu9]3‹,WGL2o˿~~44M#=+gdP!E Jz% {[IЬO0]F LqsfW՝* 0atIclsfG&WEZWEJ1$zpe$)\uઈkT_HXH \H2F[zsqU^VOhO>-]u}/K8҃jXj9F8SYjz붨z=EQŹcҵz'qqgVlrC!Z\\s@ixFKn-ؕsFcV2t6ߏf< fs׮9=Sj1'O{SC&+a2i3Kffe\N'G1S%Beb69+rHMAQ[Q=L,˜HUf+/%t^'a=$0鸈+y_(voEʕ;Ŷ;N=er6S|OMcHoov 7jߩQ-4#K*M wdїE"hH?f2=;\R/&sTASd_Ooh5q%~lPy`CVw(qhwd3L(ay51(lq3"<7J;5^&nF_ O̕K![q" Q@njHcťzYGjii 5խ<qL^O/URߔE_j%s,O@h`zr޽22%/qhwXQ_1+ڶlwt-Ukѳw\wOi#[zcq십V5u[Hi[%j\3go@Zc_cHiW/$}+X?{WFd !K3R@cwۻh`1_f<iMZ-77x:(tɬjU-[udEVF|źîjq+pU孏)5•E%d+7Ճ̹,{N%X.ܺAl~kc`ldg`+0]W¼=LB6`w iUXUWCWHpU++ E;1; \il;\)eϮ^#\[:WEgO\C 2C!? \IKvʸA}tO`pɶ{JQjO)MI;\-ꐡtV7S\W$pER[W@kLJ˸ \q \iϮp J`vupUD++%R]rI`⃝+%5cqIZpERrի+ 1q.չ䧨*Ŷ~>anky)0Π4jOieCvEJ-z~(mZu ,qWEZzRYI嫄+TZ\Sk񁋴G˞R 5ب'ٯ$߳3[-={·[OuSh|~l6|?|^#kͷ՟Oo8F6j?|>d2QcHz|nui5X'VÉRPzU &#Vj5=hLWrvgf3"Ǝ"ŶhS8qiEIXj?q=.\'9Ne=l[!WաCwUؙIpU4WW^u 5f;WE\]"m"ƒZWaI-{ ⥹"]"l=*R*•ZoĮZ=XcOɭubT`ltVtR_#L+%h4Qx8Hqp馳t/~<9v3:xv9˿[Uԥp_HsD a+Y L:[Wl: Nqߢr;,UQ_o/Oo|i˔w;z+;@dn]z0әrsGg)&e^0BLmQrEH4 23f-PI~A> TN Hz%(YIEJ|2q,.ܪq<;ţm2NJ}eK^F.V;ݻ;Ǝʿ#WXR~4'TdKFNTe4Cu_(?-߶o?GҾN +U|35 V)oeL K.v O^FrF*ƛoeFV%'\|c+, I/h'%W.r?F+wS[ Yb>Z4مJ \֥ȲRdcmZJ8 "b{Cs fGE}c^;] byh]bҢ2߽vv֫XM9VS.^|r*},ZSŋlq.קB6f #79!Vo]+˗Oc}u ꎜNF i~s~(iEiՔVVMPC9eέ8 ^ɍzXX{=,{\1| CJL1 jc[dSso$te&[ K492|R#M[~Oj\Ks8qSmz*rY?ssMƏ(*t{97A^DMAr#U) @]:k$Ocd:0t.3"AVGjܕ^|~rKduPD'VWDS8}\)ZgIvshИG \b\F˵1[ P*牱}JalBGs5ݥgݖmYم]#0\Fi\ZgKV==vZ,m~s ϒT$tJ6Dfq,ɐA;`Lk E.AĈȬϨ6@JXd,+HwʅzM4[pЎeEU+a+Q';axo,/a,NȽj?Ѽo?$ӁO/ʈB˗=L+@Cd{Z#%-tO% V ^ ο@iq8v뾟EKc8/TBMAh(-{Îhlg %0Tx)0'LT`n5(tdr^bʳрS7*9r('GtWy^L?K<v<$wO5I MPE*HӰ2dΘa㙘 dےo>g("GC]z4NF{A-O×[JO5z|ϑVBvCvԖkI[_DR_55k%^S;$7;oԒ7b2M~X6}ARR]>S(QVGFR: IxeLd9YDhD?t[PNy,bz?o/FFT|&4?TsOwK6xKtu{C/V1x p; Z1=i[,;IUz#Pgk̏a=#yl~_ fQ^"3~2m&/gx!shȞ̽Ljy2J>PA4H87\&!s&>Wy8& IJYM&q1*P0NCVqEa۴6M?^b;&aq>M {Ea~<}m&jSsJ2+UdFl~J*ɍ0.]Yu)u-S uF@%czBeѠ쵕e*dVd9DA6֫@AEq1gDmeYb,QjRr)i926%!)b&\ g'ΛoLI[*?wD~KV%1 *,.ofߒIR024p=87mIoLOJ'V"Lcrv.'mPR=G:9ƺ&iJo]IlߠiU3$9K5T yd dYeJ[h5R1{2-hpJl=.E,r5ht@idlL瑱96bm U/kJW=.dl,G/qAFɧ=%++[oC?s!#Fʊ1dІ+Yk(Ξؤ'-pM&ߎ\$!e 6)qGl? 5 )87=j I¼Su,:YvN(kQ10䥒K`trK d" Ex6.xT C&f !Ǡ 1;ȩNƹ1qک/U5ò<66"le=">Y.EcK0%Y^lh-\]zaIu"$r2z3r-3>YBR/mƴPc}ϊD9jW [P-1ߊib  #./-0|"`p Uׁ)Zl.Xd[fPe$&qG796.FlW79&HB ܗY(0-y.s4"#D}~L F eZ3 ﵌ &SHV,j;kgKXevrS`392j޾%BmwBy5;:I@ b>ɐCJOip l`ܵGJPrB}V͋›+.TlQR!͞Yy9}5*Pq]n%FפәOYgEM۳ue:-vb9AZڱyXy|D5LrɴB0rS S⃔8wʲȕ"WDt-zz'}N z,;@6pd(2-zRLq/TZ Z<53?%N9-~a vh$xgލ oǣ_M,1]KgQnV"2 qEeZ^Wꞁ2/NXm". n`dD^,%B+Ղݰ.Effx5ZLF93u0+-E> *m,iXP GQx5<p|dzxLOkyaB)ڙRX݆*A@SG9q8hjSaTkyʹ vc8~bqZ%\ NQdBMf/by&8:(A)@;AZ_7(=iҗzݵXu8$Խ]np!w{!k0IBUgnƃTQ-ٔQv)*RY:7r,"@/O,[|SZ˽^rI\ E2ڷвފ% 5VhLy\沄/]Xg\Ewt!_X}<4~>Yu70ʨΟ`K3XCu:;tYwB_ϢY?;no`$Y솞ó,HavN:~A$-oE^< M!now~9l2իO%'6Q͵67 Nq^OIWtҿL2+~nţ޽+Z|DZ B2i<<]dK9{>.ޟ΢rLzwik0**|,}ɔn'=_F7e[Sx(QFIb?.ᵟ S߆, ĞV'eӿ%_g3 Ru¤x7MxѸ?s+on WZ 7[| TR)@j?~SW7EWVۧk}{0 aP8iX=^/l2dw)k`wUNy1MUyWNߍRNQ Mښ؊fݱ2LI[|:bOv,Suc*"FnzYvXX X:quRxE)B6bQ"v0z~;5[p ]hHQrB*ȴ-AA'R V ㍊ԟhIl^=F/@?-0q[͵͍k[h+`] Dj5U &Pޟ@aqs.go*chK'Op/%OX~u|Hp?i5y R!ެ HX_=Apr@* $Vb@ZyU%+rv曫$dci4*IYk^b}HL%=sUPH˚]%))j 4W $c@\f*I댫7WIʥDz9J`: sރ}\%q9;sU)nK4W &_:\NdL8ͮWS90:Ihy :awTt Dkm#GE0"mxNNQ-:9~*[5,u`:,Gjy(0-?Tr8ikҞ=ʶ$`ND3l2x"P{¾O8Uff՘Z8Γjp4WOU&;x˷WH.z(pt*S \q"9$ pLCL4WJ͎pJ B\!L.CL-LPGzp%N،`u8pP WJzԮ$\qM$߻2H28eJfj*A"Ky&cvFv<҈/y:'Ӌhţa5Ex;RGM\Φ b.]L$!XslrFO[je; ;vV#XvQ{dbQA׿eY҇(ӃR'C`>rιx6P-8-oInl9zmNcMñvZ|H~e貣ǡ(}@sS}vM )z^ꗷ^V\} i]w2cIKozy1,ӿRbiaJM:u,緫-g 6 OO˙7Z"=i-ĒF-Z|F! UFܙy|T&㳛 *) !Χ7"/f?iǩ"lU/̃D3eE<)NDᢶ\p-b@ܳWEM~Y5Fl/Pow9O/ZBzhbHכ:fX C۲+"mCK_DVzj磹U :DheҔF{A0.C!B@1O)YjZ5'm,$?yg :OZ#k9Bkܟh& M93nzrb9SA.n8"[]{qrZKИ.Dw%*`&*X_;RV&Xy{&WCj;H%5#whX֖1&Ү:Vv[a]7~fZk[[|Ej"7jjP"Q- λNS@x]Ơ}3$T@Fj$< p<*oc {2@;f 'ofU3rX39U\$Be[µBRiEЁHOY/_b1v\(i,a{4vwn_.+~݂K QZtHʱ AHJFE,Rc'5"2/lOT!]& & q˙yw@CT{ T$m`RhrLNj~''R2?|#yO4ƉP:1c"V;¢UƙY`gD(>:O;3O/>A/9"Ϗ<"O+ȓuk8n4##K= HRERFKA7HQFk'Է&K{$tOE̮8de>6Rڶz~` lI_jijׅnW/wRpa<ΐyʜť(nTT6!rf?}|G߿djvԽw;Tչ78quAS*pbNE'i]?r6BwAORDK'e%UeǛ7q}7#|FrJAS9~ ۠ϫdc2+Bϱq__Vxu ;2Y UԲ F*Wp"WHir5΂ҋeix]ëK?ZoKHTRedv^Qz.K.λ*Kj,VeBL @؊9PW~]Ƴɔo^CEfC,=B"`j.p5n~TQJNx]U'5a/#T[狝;}alo~vBIMKGt8{/g+_uf7Z:u\b ̹kZßanJ^*L%p?* C\oy®F_>!R9_{Mn8Eeϛ}zmq~Rj+޲R'>[5Y'YΒa5-QUhɕ0mXEy" UOds'O[}?Oó\A6Ѡ` ͘:sӭuAפw>9EYu>}:i^"8QQ=dR*sfdѰ7K"fNL_0Id&bەlكrͶ;!i O| JtF|:ݏz^kdkPܲvN|K s$9})]MI_)QIKn%I^u>,<ox{5Fe&.S\=y`r ZLBӸ|#лkb=u^/p=1h\+vWLZ… 1@u{~3ʰLXOx64DO*Nj~m`YƼҾn&F,;?{ %5:u$ ,h.>$/<u^ɠ !h;禓r9%т[^_COAS`TUjz;Sם7_lyMoHJP&Q]p2js`AHN_dAAAyQ~¶vaڻ7o]G&6>#=V.dbѬ@V)\sc`Rs;P1#T$2s8q|fL9HO2?VӋ]#oXQT ]Y4J!Eopnd}w%}=c|9TCBX=cidO= w;Etd,EO(*4׎vvtKI(%qǙsgՎ1eTZ @ ޲E9kŕeF\ٳLIOp Mfggewo1n" Wrl)0k(W'^|y)p^đEVY'xjU@P4xՑsQh%iHAic ӄHtHLXZ{- Hkl* 9CQXIXtVRr½ZP*a)?[G|©e!e8Ph-4}¹'ei%2ڨf .*rqrD§}i&xQ:Ռ7c=Jk թuvSy}߾ʡݦj nQA&'z|eM=Uwئ:/`TEVf80JoБf%^E HF{D>L8&8ӗ@ fK55:/Q%(x$H&rZVȼ"a>X\8 e:?lxGm Edu2~2w1`DhcuRN7XfSĞzX;Po(ѣAk^lfh߇1vx6лV/ d\b(1J"9wǖ(RA"F͆;(QwV_cvq'aeèWv:I}6tO"xaM[+6,lfEj_ #kڃWo/x>L>qrk OcUcA8J0XNsx2ꄰֱW'$+N{T'4hbjyyn/A'uDV[( &ҹNZ<i3fdf'Zb3X'`4oʜʲP-^u*HP oTȈ8g*ZpZ#imBZ Kal)"7w(~mzy:'E{nݓkȈIg8a䨳3:;Ψ3:;Ψ3:;Ψ3:;Ψ3:;Ψ3:;Ψ3:;Ψ3:;Ψ3:;Ψ3:;Ψ3:;Ψ3~uv,pzJ'SlNacI$y1.mrIYNӢjֽ7'Ms%QBw@ʫD vRS \ާkUh#U^,&˷Yɯ~w1$8PyO7v-[k忠lRW_SлVR0]^([z/paͽ ׾t}!ш,*~Zg"ȭ V+T ,Љ F^h`%'I9~|}$y| lϠ ̮4 n̸8k)mYGviH} e2sΝ*YvhcYd0y8x`[Wj/ҕtھ3O P۽R2Dǂ9ǤiO 5OeBZ3Ĩ}}VFfRW-*J]53玜aMHYuvȒ)ҘVj@*s$ϝrUe3]0xfRW[UT@j;z^NO[!iii*Ƃ()=E>!Y%a#oZlx{jx<>LfC?q|~>ϝG˓GuT)¥9Z/;} MnOΧ:géd'MBWqeLoOV_iOcuwg˷^&kqL1'tVo.}.Yvd6tg4_ ms@vrKJ_P3b^nΛyY\fуԋ!O^9=.ox8`!ZxV|Mؖk?>&5d^'4xğ|5?^)H 75_V7V'i-?^|^?^½~/^;D?%!<4ۄy> ֭F{Swiڷ|uߡ]%[>@W7籯'JӴM䧳EO/'z6&,O&2BaIs'0d]h- :ڤ eۨkyaO}ۆ _3-~_&oHwx}LՅ _׭ZC\B-ޖ۾n1}C%@o^ixbNg/h`! v+X;pB X/W^.>_8V2w}u̗!ghD/;q{+QDFulջwNla"{kT}Q0BVJօYmWtQI6*W&E1=ko]?z>f+/<~QM[JY5 duA@, 2T8dqhQg龃nt¢O\`w]農Ch$gY":#L-G\zh t <0$ΖcB’+TܾׄfbYgU'N(9yt^ݰB>f/'کǀu9od %|ϓŻm)iS*~M–{+_oHv:)eT!׎$CSM=W5ߦ}l1?.ynF:Rwo;ӢGעH+\ɏwHzw>>Q?T;Wkڊ[7AHO\$D8]x f/2rkU%1AXemC^{ /ןߖTgǹO`\fޖdQKM&>eD:Wٻlz7\cp㾬@bz/xm2l$1u&&Ԃs5)^na[cTذ-clBɚs)b s˵xꃣ.E3!`)[!n{w !̹C*./DuETv\AXwey_$zfPI-N7O_0燂o@̈W VP"@B^pܥtMYݿ>5@g :t*9ScVE#z}- :bli^Hĝu˷\_N狍k_k?Q=t VbՊ>5t>?"ƈ G,yJvhZFfafg }XXpk=&.ַ׎,>-x1/txx_7bGy,r`}"#NKp޵Ƒ#_\dL8w bdkmK:_Ւe[im7 KUYdfd_Y<6چWuv&GUhSHSQs#v9?w$0YDnBDG>2؝<Ŕ쩄fILLٍ:sPJE3\g,4`gыqc1kO fH<*B0 YрXl K퐓 QbΓp9p#1=vvz04!wL.EpՏ_iՏY_-=LT˿OXY#_rԸpTt1KN62ϴ &9ejs }*86mESA-HTCS$B ET^ ~esf5Zk 黭ڛ]3 |>,uo߼f8)~唱D9HҡS(b}s>mg|bDPAS#X^?DRʔ,-y͒S/ Jo"L\FPu5P?YA_k~׏n9t+Za늣Jʒ.ʿ 耵d ,(F"vwT?'py#rPC1\uMPi_q0wBMu@8}Ba枃 o[oo=o6轑;Mnlc^. Bo=R~}pճ:\wyv禷ZzZv憸r7+G]ow~x-{n.G>wzG [׼yK[kvuLoQ%mߝ݉g&ⶵW-S!0q 9[[^ `DakQCףzLI|oqt; nrޠ I&;ScJj"k0X3mq;Pf9TG0Y=3ʢ~_r,ǰMwAlW'l@bX-j)x|[7c0>l"F lϭ=3ex9umKmYw#:pJgq|=*݊4={BCVw(5F>NWҮ46|>j͘w ,Y.=-B\ҋHڡ\?geܡt1'I$lk**5| \EJjؚZJmIjz`xFMtі)אb*H+^[eR4g1*>jiY<☲-;8ujM½2d90,Jɪq7`$7j({OXzJI rÉ*C)5gyqS|SqۻdV1ԛ7+m&(EIzV$Vl`t|WX=lJ1aY82 T9ǘ#g-Z]IG8@GvJE3DIS%D"<8#?cP]6k}ױC/U{12c088s89??_ez;Cճz/\=t3+/o./^ FRn]ؙyQ}K?.g> _R;eWn9 /-oTrv"Dj@̙ eꮴҒtTWKS"XDtܙ+[JIw˸NGJR^wV~ջ?w?mvfWo;tۜwpśz(W|W{m+ny@mtv~R'w0l8=MpfNl?կUR+L5¡]**"=6|8{:xcR: cy=& RڭVKz9x5~k"RL^ `žV7L'|x!?xe#QU]Eu*L% pb'1*BHUhDR4tfͭG 8gRfm]CD%[C* Z-%3 |imL~3C=h,yQ65f!E5݄fbNI5)DnY8V k@]S[ ub =76dZ#o' Lc&G˂ohZ%fQH9XŀO^-XTj`txѱV\+*Z\xɸ8 L3V=X|ՇhJ;C$q܂kpW O%b +,c * )4vԃƒHqsk v% > UFnU{"x70cºo! iD`PgE3=0{F ϋhx"f#aj;FR00kl0FqTM#AI1< 3TY7V rnL.u / XiA{sLEso %Raj,T \!Ԗe ^ߑ h6 ioU(H1Y.=u(@OMkc d0+MJ26DX!5RDǁrvYِ$Qxhmb{ H֢P4y@gJ>qnkm+& Ɂ ŘQʩQE0!!@]y;?ΰ5t?3ʢj1zu= tI@-DX٨eu`u0}KАuJfqKjhZL:|x #. 'uc}BBE yjɠ$iy" c)[ÌL֍e2"HPbhb3(PvNnGUPܱ-ۀ8Ɔv U2YЩ֏oM>W-Ww48mPU˚2w*<}X@YB;SuD!t+"a{qwP P2 lwpkijVSAl53x @& z˃IS9H~l{a̓Ģ;`%cwqXd י w ae* )M6@gM7H!c9:VKciNmY4g&cI!m\+wcꭚ+ *"j,eb俩rVo WqF;(>ͧ2i? ?~i4 'NQu)__^ƣh̟Q1tSO;S˩OonfX6 \ M#ZNӈR iLnw;o=YK?ഄ #Lwvz.b Ioat=$m&+`s>~~OKa6q_%2KMS^TԤE[~ ~2P"`ųf(]aBٌ33"V{ J. ]"]Yc[߹OaM0Vt^Sᔼc訙Cdh}2ωTDf}FT|b$5,h2[),s2 ( LǗy]4"M4=5Cr/ԧOLn8?-sp-R:O "y՛//_sӛ0wݟtpEaUHIbb ԑJU%Le!D:p1\2^{jIRɹ"t9uHNt],w h U4q"%I{&~oV.X+pM6PuQOHwQk:dZ6lžM-{> ٓex3-u'Jm@WնC,3\dDWl Zs+Dҳt(,tutŭeDWX}3BWVSuBHWHW:MFtЕ(Vɮ4ҕtܚ ;++M.th:]!J ] ]a~J9MU>HWֺdBWCWZ:bХ8f13+l+̮t(Y*Ub!%P'QFcZ%šEjō2DQ&ǚpꜯcR $jrRH⴫]>]ѲΟ!JYv՟gWk>TNWl'avtڗ>{6C:vp7+]jۡgr-2+,(φ.6C *tu8tȞ`+k+@:]!J ] ] ICWW\ +w3V:@$L.BBBWt(,tutr*'S2 ]!\r+D+;oB:@ܶݧ+lY>vT.thq]+U~t(mv/4XǜRNm3eDr M#\Isi|4(ECi+6#B.8p ;p JYQ-GgQ 4W|ev䲮:G4̴1JhP?ݱpI g>d-,Y~Ti-tVxF ]= ѺΛZZ|]5SɌܿfݾj#ԲZ⛡4Е)t3)bv&\ v`4] U5t(-/tut%ZfDWh ]\t%{Lϧ+@-tut%d<'e|A˩ʅT]+DT+"#Nl  ]!}}e3:@Ҋj'+C->#ة:͹lnͩp4feC\h*4}4mfDW8'5&Bte ])M3+E>tpU6thm Z:b̚(Mc.(|Ojiꨳ-4()͔h jfJxߥ}<Ǐ@;S(<~\cX,,+9 ].1 ɂgՔB*^(6w_ndB~CE yGߡ`?l aQ͝mA*>lE|.YPä?=] :O޿fs) ?NXzF+z!V۟v>P?1{SYjOEE7ޮ1~HL'pI1%0x?'g+}X}3IyV*P95Q Cmutb&&+[-ӀyOKd.kk`b4a 濦`r4ߝ֮FWUFh/gi؛B9ܿnQtJNnxj8ɡpe~xZ1зfy~l"6dRa[nn,+f֖م ܸ)9|=\]׶^xgJNI ONc|7-}`Ʉ/^p0_xҿNo|`j.MZh! ~||VԜbQUzb%*)OV誘Dg˂3fd-tQT"*&r\4Fpe\+}MA)k/A)gpV+ z /({GJH BIh>l%LjnlvIyX ]bAγs/|[vik:v%zr&o)P)tT'=:֩v5 AeP0268G+w'w. kwN!(0"/BҁTn%f|[=7Z Kw\p{|E1y:[ *ې!A|ҁPwpގGUZ s*9t d]ԙV>{l/,W>XAԁVqn*EVKm:!UZYc,ߩ%DHArjj,蛾OM5'(rAs;wiƠ}P~[ɑs\s4@'DȹުZ"D⯦g2-G }ӯiV8\AD[*ȚV* 匃YiCMd8@!;]1P@-l:9ƴs$ɩwuXUm<3E-<,:P38OrOǣѴ]_ t`3m־9+t{37V;GZuȵ{9^NhEܓfiD+0哼lB|FDKzC rQvo}in.!x$%pǢ20^bz!C븡u:db0tKh'4+TBEІEҕI*VQU(N\ ,5 |S5٤5Vb%bU嘔 L H"5;UiY555A7UWKI1ڢ.=)zlƣͨ=pt [&&tAlר6D]xGӦNt~omBy":rQo{jN 5+G J(ݴKYǫNtQqˠ_=ޛMP@I2WXޫӕϕ5jn||>n>nm:s'WDsSׇgm5A7Ϳ)Yx<$2 d̏45~RS.L SbKs+=O)J7][y΅rx4 ?LHR4@{gd #VHuL]Ew\*]޷޷;>$XU'@+U"hVEM]-LeQ򓻩pyqU=dOJ;g^F.T޵,ٿ""tEfvgw AO[, 'o5)ɒm=MYtejɮSӧJ#pK2;_(sL!=pKs&"y'T4kjY@] "bg^^"Pǯ6/ ؅ V3LomfnyK֙Ž+*Rr'޿ l/*ă;S^ UX.pKN!s=^^oeYÖTMe$)9ÖÃ`byFKD 8Bƽ^I cZDoP/$O4)olPs:m0JR(SJp{muh/KAkl9<\͖0|:ln?ņw8yע]2:'RTUV QF,P0/.W%h(9/V_!yԚ)c$p x,Xp˨Bdh.hCS0$}6sv AZOiP'r N#g4JJ I C$Ak)~`OCq"_P|yIJ SH@stp dm2,d:#:P<دʎQdMF'8v!$VCDSrJcu> G'8}YSUؖUqV0NnMR fMOr!r{C! 6|ZG-W"BYpJH2jNYcy~,AI؂%e'J \BF] 8!Y7nR`m`x.!i 3:aApO ;˩4/s*4O$߮eeפ;KGiH_P3D?-]3T7ӡi vMzz&ݸSob(%g3qVuwu~^OΛ< ʏ\Mmuc2ߥ_ן]quu|DU/t^͍1Z\UVP +FQkk4v~g_s7̗Yϟ?Š{?KT$Θ6ŋ533rn"rf{ ',8L/ 1٫^ (6c OXd↫~(Yd6Ώ%';xDɒpqmiӛQʞ;v3"a"T_|R[N.flo~LBOMK"d:pxەG_2SYߟ\+]˳÷Qg!2W̋VOgư\9ws0N_L=cxj'z\u4[fl[d\'0>I)Y Zy")vyy \v!x9#u)kLP,ދ/EtƘh OU_{ @ۦ0'Ja&J42u;j* Nܒq8_6g^ [EIpbM< w4/:f 2yGb skDbt ֖9" Sy5*#18,f͜ Iy@?bI`f BꍶģzKڐŕ9DiS[@ 4Lsw*&A9+/7G FeZ!e)c+x ~"嚵r gixa>WOJeXǸAI0:9N񴹴8aHR@+Us! ,X)xQ oɲ[[9̻>̻h۷XMPV7Lϴs'Sy"ݽ4)O /aR.hi7`Y S8aZ~WYG]#WV?gG R!I/R+!(%DsLPY䚅[i9p6oG&U6%k/ih6^ŸNTA>'>m9`29N_m*¿H'DB<|Tu-eE"/!pҙ"1ɜ-: aǢTʪ۱!Voi/)pmT| D-,BBDFL!ODH<0̀iNպYUMIYF\7 .v< Etx\(lYmeޞXg`x=X3ݒmPv)M䔭~Lzyz\k_L-4YUm[&0·PDPD&2L =Ḕ4g /ʟ_T|Y';y{Vd_pa:?>lr53טGÃȽaFf%_S25˳ KqjŜ}ww^t+d  ~8|9t2}yU4jlWmy9fT sIuu;3ۯUݹsr'?N;ӫ`R.ן;ո<օ⫳ΗF !A3edB,3x*Q0V<%+2O5͜]jqI&w|~zV1woC.VeMƏFaz>Vm S `nd* MO^#3TKUP'/e+]k@Xi a+@%WX9M)3i^$F^)S!|M4FhJ 0\idJLDJ%b1#m#QyY$$iRs]xQĀjb NPbDB0:QYí8_9;7餱KDa 9!S#I"pͼm'lRe@dt]S`p挦\ %ju(6_TFs2a`I)D?&DcrT&kv|\q)c58/M!OU5+u*8dfIOUhmaٮ7X"CeKd޵Ԅk(oDZU #\)EpCq?{gF".n⛁do;slfɎe%${~U-YVbQ˲L3p,I>d)Ex+Ǧu.6 $'< fc+Cu 4 o2MΗɸX+ Y =*GB=,>lw|㬞6?pzzOKUP[W:Tzv I ,1Ii5XK J{é1䓑RI&X F+} vFOMSF\Ǿ|4vHm6@v;NUP$Ј&wZ%+;plXvjj`nJz H>o'E뵫v6'4«B0xMlk2[ܩ rb:c?`elX 4Z:\`_Svtm&2X~ӥsO>Lbϫ/7J/>v}_ǫB& xVrO,3W @B =eG6!'c*l7b_8W2fpq39Sr57-楬.Qt]7T5>}-'AزIM sIquVE[5А]NUtQ4u%Գ۸m=̅4V_O>Fꨃ*~cdT :Uu@>JC=ceqh/C4NՉƮKD_G0L;Ueaxiȹ8 v q.rŹ8w.*]q.>Di3 & &\\\pj;XÂJ{4CqElpr+VصZqe4,!JOOK-xt#X X}TeT9H\9kt|r#E[Yj騦dZsDjX=$z|(!A5ͳk Ïe묜%em!y:0g}tJlPVL#>ܠk~\\Ы!J\ZV~۷/h5U5ndcT"8A'/1US-CH?>Cj/}mU}uݚ`áSҕڬٟlr*C)JyNdUMn 1Jz{WMF`%E6 U7`ZvaF嫱]EJ㪛`nr?,u0EϦZ|\]@*3 6"\\+ru"(dq%AKkHGuRk;H[\ W "#\`1 ftj;X% | W\`lFW75=WLq%* F1uJw\JUڇ+#-f5wE5lpre6b{\J]+ fm_P1P! X omo+9*9P`ӬA1M*Ab)>%U>b͜ս_e WAq}t4D;׷2c&rZ:QA1VJUc ތ} cyN(NjErbo^< ViB}B9^<ϕ o*czL>y3 `l8`:(RIрGcx(~IKQ<*Usj=\RW$)pr+Vdq*Q\ W pE}J X=X-!JWM0Wlpj;X%\ WŜFW,lpErdbJ\ WZk/UFbg+SX=XiO. HJJX vڇcUbUր W$U;lpj;X-+Cĕ3NW$XeB6X}ԪjrH%}{}+cNrp&@<} 1_.[zz"Sq:bQ:P["'ۅ?A}.,b. a.e%I~qM?p'@;]s=I>\\ XjNitE%3bs5b\ Wi"#\`'=p`nj0}]p5@\!ق6'ckdF+VbCĕf+Xf:w\J_!hkazXF8bZ;d_l{Ɉ$t+7);Vkt)* )mf+l2c+Rk7X%C(qe3/s+ӡz!lV3$|zUYfS5Ҹҫ~5ܲJ(X%o7Yh΢J׳dծE[W\ Xr}D_p5@\I]F"Y2\Z}tWCĕe{<`0\\rb\ WV\'xp״* X1*-\ WW9|prWZ !dq*j2R?Xg+Rk;XJk/wgNK, ɍNϷZ |7ei'm.fK RLNz+rU`kYb`HSp5D\9'ךSoAtʁR63݊\b-BS"w;rG4N0rGBvk+3v}AT 6Q,g3Nv! vQ[:(:nnu.jAjʾ7Pp v-z@g& YJ\pj=JWp5D\I Bpł-d+t." eq*, ƺpEVZ H{q*. hLFBcu]J6'6ʜ殴dd++8Z"ږ!h8>Vݮp 7o\Ft1;E IV&Zmr#~lb*#ʿM7}s%_p$rN8c3`17sfEw\ʵMWC•kԖPhYIeOjj$X|E\t"Y-+a_O[= GV; _ &W8@K7@ T‚]02'\?\Vd."mqE*4WĕyHG Xֹ*T7N\ WqAFb:cZ XW}ϊQVӫHک55ߌ~rDe&P0O~y˲R_:4QKUZ\s{׬%Zp(}sulZ.i 7O8==]P.x6?ʲAM>kDId!M+\z2!uENڹmN>KդWݼ޲.^ ܣEnF7%~њW__^uʿZ>ܮZ" ɍ̧eC;g! Vk;^pI ,cy}1 ĘdqSWE6$0NŠ! L(Q%Ƹ>rr }ɿ_>Η$}^־^uG)9 _gL٠:O1}&@-D5%hEV2Z VV/Bd?V\;beBWB! e8 #`cE45c*;p6[2Iv{eѦFDSK>ZKSQ@c59 =Q-zSM)D5T:*>HXSax@L kmȮЗKM=n@!AF=NJ5D9ԃ~P#zFjlQbtխsi,d >2|r (ԥj*A Bc̠JUYa9i #NHXe]LgMXee~wʏ^F [ŪΎ]`10!ƻA Td6Q t56b U&5:Dfa.|Ѥ`a {=dHiD0iyM*}b1; ;]: hZ=%f0A 9`ɐQgk#ԭY.Ru+`%V 72ǠA]KrAy1ρ 7DR&/ ā 0| $$L)ڽ,=ipⶦ3-Ah[R} y@5 bu5{,hM]Fp2zvhu<^aD td+% Τ$12iU_(- A5qwpDip}X8'a1=TV堻FRCmDMRuPkV]@N:Κ&HhPY,p6Rjv|뭩+ ."q,e"In$l& -h4.A r7ǭ ]0؀y nz>]lM>]Lq`0u0 gllfѳq5eYf18k1ךcf s5Z 2Ęv@99p 'id|  '? 9k9n?T3F U&:I%\aۊNv0 )aTХHπOt4<<뛶vƾ:BĿnw+AZ964'w A#GXѡOz7*oV1 /)Ca ۢ R،J#x, w3&*cKULmyLG[19mp@୭XEaw <֤] u(P5jI:LÁX JVc tmKzCTm _>@|5@RMwU dA`EH.֔7vLZ #+Bj{@S Ap('f(8qKކRZ7$Э E=Ap*\OZR- ^.!bc-ݱPGR!6.X,;uSZ ]\*3J];j0BI>z R DŽ^݋[Vn+n*E-6-32ŤF-~K`{=& 4.%<\Kqia|oнگoA=o./޾^//֧w޾»w^_xBz^T g׽=} W>?6G7rc\*>Rdq38^][*y;uM08YjlޭO^&+O+>?s5.7_j#m_D>YT:iCvc v}>zYlNS~O#U\'FGW콚O pMgJ$pWx$p%+ \IJW$p%+ \IJW$p%+ \IJW$p%+ \IJW$p%+ \IJW$p%+ \IJW' \Eì* w7VU[z+G+ \IJW$p%+ \IJW$p%+ \IJW$p%+ \IJW$p%+ \IJW$p%+ \IJW$pIWpVp7\1|ВJ=p( I%{Y; \IJW$p%+ \IJW$p%+ \IJW$p%+ \IJW$p%+ \IJW$p%+ \IJW$p%O2>$?|WlW@̳\m \t% \IJW$p%+ \IJW$p%+ \IJW$p%+ \IJW$p%+ \IJW$p%+ \IJW$p%IWH0> `8z};z)a-8{[  yҜavF0uz60 À2)ɇ|Ϧ9?ֳ+kfge;]1Jg^ ]q\1Ή8(7b&΅n^+F^]53\mF2}q /]w fO[;Cx݄r'Ξ(?OF sԚߣ @8>ƐbJ[3' 4ui.4h{4 cHDttŀ} ]1BW%ed^"]Ÿrszj;u>oeö߭9 ?^c "?s*ˣo/Po^훋Ӻ^ :_oݛǜ5vߜ9m-טڗxy%mg:/Tzu֯ $trW ѨMοp81Jϼbmy~yeBa;tuz5\^Z1+?}ydHU\vOvI~Pe/)?j: ΏG,ÛYvj]XRUiY\Iaxk z|,6(<,?LrjZ f%YՖ)8ZMv=SN>$o\3.썙waŻ0O{Fxߌwqzw r+'tSG5|/ι(]Cv'.k(b~?u^e'ko{ň/\_l/6|}t~WϽGAdif0Nicc"˳vOś~hg'U^7_ѫ_}b_1D{W;Im'sua4Z[_ƒ:V QxۧϨ \*=$VjJ9]`l>3HFo=CtuZ)$\zg= kC@Wٻ6r$}=m=lpw;flƲUNA+vd=Zqk8lH*Z:t L\볁D.WZWJ[zpE\᳁D.fWZNWJ%[zpŐnBv/~Gn?r:J tJT*++ n@pJ]pȥ\*Qy*Q*@T\/G`?rخ5^LT*kyNT.]L.5sK'[b9 l`:f=0lZ L+8:D8%8+F jmG^#\lr 1$;^^9d@{q¨yISMI˧hI^Nr:-*׹*IJO\'(Q$j t[!KE'a<#RE2A9 ^ yj(/>ɚE/DP$&a_ܛt~wc)3_ar`RUC{ڂG >|e52!TVTgRཡi -Ws9ᬧ\qX* M%9L&~,87"ha8Z}qeQj(z=Oad&fNj>~,rhޤ߻œ/~P bCbiP&qrޤe ԯw?9fUYEd|?4\fܼlPHQᦗ>>U;SUoV͊'\9h mhaҏa=CRK֍l47 . &7eҹV*zE=ŀ6C isQL;DqX1Fft`Ϊ׽yL58m!jFDNz!\tPyCz68R$# wE%K]&f/LN4/VY`U@}:swV=KyxD6?Oeo%ej"{"ݥfVϊ|4Ro@͠`,AI8=,N?ˍ/z _eɯ" zBؤWjjhtƞ6UfˠȦ '^ʜR7FS^|9B+w tKY3t@8L1~s>{3BBT|MLv@E7(D^"Z2z)[TLbma܋gtwQ<>Uct`=L:2 1Ƨ<9e##BgtllnG%f+ ww=IU~Л')u ݜJ\tBQf3u.9]O 0*4XgWOL2d72@U{"LePxl򢊤R㖆*|`Q޼SyeAODah-6Zlw}}) fj*0G0 M8Li|tD G=QX'WzBSz,O!~B5p wge&XKL%ƳRm7ͯyBIQrB*HDJ-6Xa,7*XEC5$z@/qY&#o꾯6X2qM܋WϞK>P\xݘ||,@VNJ)+RԞtx.f놴Wń6ӉMȯuĴBQʸp)e AfE% ދ>ѽh{ѢUEhp6! hQ9,Qkco6bOV|!Kj1w W|KVrLإ.h6>" ѾPV`+)E,">~WiGY2S2:oKS,`. Ynu '1beP4tLz~*C2!t; ӥs 4Ah\2[ BSϘ8P=tHbaJҸO&+rN*י@fȫM@@y:a|Mt&P.maO𡰎\v:_ ߮;6M}977 ?6[6;=|h}6'ڼ'}[n=?/S, dz g3^0\W ۢ;|+꽐ǺvQ]^>?ɾۯzL[)Չv$ s6S^2K9iTq?%s~F<{~A6Z4ʐ&cLh3C%1&y*' l72Lk5e;h=ca -QR%92D@HKd2תlLG eZY4f,`ZFL&Z iIwVnP4r`"J{>:1^sX""]Q&mˌ\p`]'ZSbT2j]! "r <vq "rTʽgzʑ_ef0sbY0<6~R#ȦDEV>X_XQ;gwm<t^8e\{k_;'fg7)hz1pgF n99m6Vd">YL ??`N [&CRIK"WdPExd:`-ERY2*CIE,I똭ѥh:EJ8.2v&v؝ҙ-lfk mǶPV[xp6%xfQ6NNtygwz:|:|xZ!+Ӷ$߆$X1G@Kΐ2$xu(ZBk.]:HP܌I-^aF 1c3GЪRfAut%vǣ1{v5;jWV?y'P윐@J%uG`P9 rK D*NaJ$La,H yL,( tl|'ꤝv&v{؂\1cZD"b"ޚ.EaVlaO91=6G̒^Xde0vZ)D6+ v /xQA H&p0c]v%9L'zԞdݧfɞvv[ceENمf1:H"/qҜF|6+ i,Y(S}͎}aș`–QaǿNcFn g۝W(X[} :6XU&+T. qG ‡$#c³`FG^8u/zO@%Op/ :Kgcksd>ge(dF96/Xjm}Fet=ӡ?=**PfPc$7 H%5 M**yJpJ9Ii{*yYY_ggsoY?)܆qk]A.͕!sgòwXk#p8((+Ex|QFAA;1sk/YXp^8o3 -t>EX\zox)!-&y/m!$-6*ӈ-ieF1ɆrFwAw-iOEZud%.DF@tDo-RdAxA"c{N[tbj|mM8LW#J=$T \5jC& l,`Фɑ"$%dj{^8c-;O( TFuo!JlH1!%h)Hˊ5(Rpt.GC3+i_."#X0h2W2DXѮ= 2hfP!+UU}6 FLvq,#zJ 8'G9+]  {\ʝ$nJN8 Fsq!}%LNàLWmLC-EE?_㛿Ր-Q t,T\_V[Eg`iRɪ AK]6=87Eeۆ^}s7^/7cRpOOK /k{0~_w r6-!76j44sFbY!ƣӯ~No*gV73̪HΧfiX< _yPvO ƳHZoTќmVp4xgi:~O?*_?zy_}dD#0M(.cpyw4-j[4- Ҵߡ]]vyCgmJð\[/?OɌvqfM4md0˼A\|&58K6 ¼. [JE ]J潅m q3Fyе-e8}P#N h/}0}ʛ9+\/~_~ ">!4;zZYeZs^qzWF?~}YNfxCVh^hŵ:ώ0!_rveخ)'峾~pgSZC.FAr=B[. [:1I1r;EQ pEAQ֑U𼂭޵XoM=s!.s5H[V`$tJYUʑ>;|\XЊ˭!zC*9q@{=UQ@ݪ0?YcF݈x&'Kʯo;lR*Tv4wdo2OAbc&XN ԏ :^:r < sGC'1pO9Q1E Q%eh7tޑssv{qvޱd-L/եz dR^KKjjY# :bn8%&pD]GŁ~y"-uDzYڕ }ôN+qzz~dNb[=Vi]z('7|:JY Lɉ ^(_2Vg.НݹgsY7Il/YnXف(}10WmG\2"Ecr[d'yg|1 bD2 0A-بR\I]fxb<;xg|l_W7Q 4FEnƟFg4γ &xI&7y"'-* 8wLSMf l6ZmUNw=ݝyo (EgUc n.y m7oq  }=IH ?Az@[+GGkK2 P65uK*4LFw;U7dc,{m4Q$c/ VxT%\|^@0(TZHh4bw wkJy¿#M+᧹׻=n8~Tw1}H*%~pVFXp(.sfPyLBZmiVt99[R`AfohW(k$Йdepl799$ J8RX$x]oӼBկ3#?E-n=F\AȲ%9sœ7rdkF|Ȭ,͠ˌD΢"!G9r]]=B XPV(DF)dlh)XB IQKR@0 2 /.;;u}'7sÌ7`SAչ)JMV#rSw=9#h/][gq얪UwGV&!W3W۫.~0g$l REJ7T)J^= he4f[Ⴑb>[wVt.ռβq|(os}OoYy9OHc5WT<.WBä{׷YNEEwMs;Ӫ}bw|;S?g*6 lNhWαhM\<٩FZ)8 :@ xLZC 4ƂjICEB\/٧{d9zg'3`=\1hdTfAG֛(\=})> ۳_ZC·ai1buwg~u L2XW,Fˈ2CX^A̜aQEUM^<}ܫ{ʼ~E`OnKEzO.2finb®Fk~bY͓ wćꁙ Q5hg@0|秲gT}u{$PAmT9E3#U'cP(h4(i"o7m"6BsuQ9tz,TrLɐ 3JhK(v%v/u᝞Ok?h(<戠,* CV)gQ n|V̸$312oJ ; R5?ޣ$8$$cjD1+uVM&TH~*QLĜ0م/4Entv.[2vXDSʽl2kc TdM d}ph-` QWn dOXtxv+z8̏xqs)31+o|<4pߑ C> icoo-c2$0ٳ+OLi.RVIi#ǴgWhј""%#'i|\L>Q/P@2%/F ,cHσ(ݕ}μ +KR-m2ԫWmtP 0-Fe6|ѫ8lN;>Ӌ'lj3kV5ܔ.2MԄ9(T0TJRR*QJ%JD)Ȓ++Y%JȱTJRR*QJ%JD)(TJRR*Qʃas+QJE(Tz[R*QJ%JD)(TJg*Ȏ'wO&3bY)eYz-YCuNש:U^Tj*SuRy*LuNש:U^Ty酶η'+1`3=?;l ڨ ^{dxjM0;,G'k ۞;ɍP]|N.F6RV"802bM?]{oG*ݎOWɞKrA`5~J)R!%;!C8H G@kQտa +DZКROCeH \Tῼ3: xGU:^G"YK0LJQPշ7/fff]_.\0?JXYEX.P|DQQ,~\9vyo:x7~O.˴pVk5j;l`cacSgNŴڀ}G1 :µقgjS+ H=j+ ՞ P{TQZf_=_X~:աQˮ]Kl;&ҏDZPN$ZOOVLC%J̖_шЁ9積Igߋ4RF0I3g}b$dAd'q 6j) Yp]JQ>;#?4jL8/,Yy'/UٻObw!Uް C.M2zP7)2f!d׉H.f#e:myk9?{S7~/_0xrbԛoo7,(J*)K!ί8eE\݁Pn::40 Ȟ/G )_zT> z 3bV[ӻZ4%$. )$1 o}H6ogy,vZ߶gƓD22h'AFXN0 =2QY ۈZmJ6SƸ^Tj{`.( :$M฾ B3e4*B yV`ǔKEu#Z!  /E0$E0Bx`D)Ayg Ks:=xLA6+ $: 7+S~H_<""y c@%PKe-gdz7)r5DRdJG彌<fTS*m:E& $ z}:/үQu_X0qG篞ݹtyUE^ǀBE\x`S5FxXRr|.Þ:(o'5;:nzRJz $]M%ҕ)U ):-~?),Mb&%%|)5CT:pW!bFEp)l9X%֥n1LvH'oU_FI6@f Hy|%ⵒX:D2(kuxdZo ARP97hBt:Q@XZ{d-3Hk &KgLh鬰po" X1D٪= QJ:*16].,{.s+Ap/G:)Qj9#4RjC8ʖc|/)p)͔-̔z͔3I3T{3anO9#4ۏC߻qiq±%E FȒi %ĕ:Vq08b-_lhYQp JG!$ymX(#De @ 2_1ٻx;i7AAH $NeIr.<#:j4bĜSC Z[#a$,)Lhҽ]Ov o~&A@J 0: q5!YϷJq~k#aPhR} Q$ZA-j&}ºQjҝj F嘛D1;%q,[r&&)psJ ,z`X?P`+(%t )TޜQ=׆eNkӇ|>lqXpYOdJihb^,4&pr8(8on2NY8Qr(9Áq2:cLRl n2 -07=ˣc5+K1͟eg?.yX;'whS|1-L]N 2 s1.~?|VXhKKU6ƃaRBH9&n3!;dՈRPwtI N{ˆXZ?[Ҳ+ YˊJG+S. <$$нu`&VV0Rp1hB`9m Q!ݒk/HLvws( ӲPMg&" #F6\+O4 l* ]pB\50xLh1k;xOįz.p9ߜ}{o7/^}2}vӳ7_4 u(pIO&'O;Mk4-jۛ6 ?ifDrCwه[W)J=?ۂ+x[pǫA7횞Tlďu6켗7,OIJ t_W*@UL/&VB=Nn7wU-~2O7‹u+o'o&:죷8p8:Y_DMB9D'UHi[\ @8RE(ޱNڵb|:$,Fj;T )@[<僰\Ir#GFNdEt>0x qeZ9:(x" MT{<d`[9=IE]I~"'/~'M¡' ow QoF0ۥvz?iT*v/*dӫV =JՊϪMpU3/^N^=a|nEyN#|ߓA _u|7[ Dlm^?NU_T[w1eyo0m`:QӷU (N@n&ER`e5jX|DmlFh/|R[pW!.*Q9R 9%”l#Hkai)ֈ)21̩Ǖ/VM]X6h)<K!VV()5"tJTAsC(B?{WƑ !pGwo`l9_m)RGR!)JElZd%pzgWE^w"桰hkz\~{K]R;ܣ_|Q?!FLoc'؇߽nl~~> W`$VRv_nZ~ X|?w}~[BLܹ^/t.Tz)TeMY-`?E:]f-6݆}~m\$@|!rrr 'gn?nԁQЊlc{:ಕ,+B4^k Dщ1!KA͂i Qխ.D CH`(yJ!2)rҢTƨ&E:\>$"#۳YFCyZ]k#NnkNWu`nΝ<*ǿWƧ{&e|F*|5+HBcu F@)D 'uOpAᕇsh|+Y{qHB tNh*k27V }1K`f>3V jr9kUe6Xp9 AYH3À/; 0Q> "Eg/I:ńq"wɖ I Ƽ +2i8l>e%1kI93 ԲfɌ-UAElj݀Fb0J(]0XPPqKՆni7?-=͡dUӓ2+53v`b&HFh7| NNۼotkkDqh2Wc)5^'oFRh@*<1 dXt.J0RiW7X[||*MKyl*8BOfRq'U&' Zx%}AY|jXXM3BW mk_?,Rwݶ^hr3duw74~0}a1"vi˳Hk!x]`G@hnbxLcР W.٠V1Sg".8&1jLd v̺¥sadYa4LK!xcKԞF'>y),xAg%/5+ EER%0k>1 3,tnĢ1X#d AbEF3)y6jL%L`81xXMxT`ܧa`Z-"ʈ(OxB4d i lcVuUKh C}W I<ՊTc1m$YIhvHN)D+1`D1bɥހt vRM-q#: .NqZg5-9'\|49&(X ¡&[IDxt(szCH*pqx(xXM;ʑ32r-HȼLe;GnT9mڹA)UQ O W3w,u$HZ~u$HJ)Nu$^` a Ud Ikኤt'zp#SkUwͶኤ'zp\N+Wc++ ^ \ia~;/--Jڻ;SfƢpni][0O, d %.*xݭ {s3jn/;cj7av[PjOL s,0M~WRL@6VHX5x; "ql1UKL'z9peQVAAFv{?Lay5!wRQݶp+njr4+ W7M8y(wC͈RK)lA9۩]Z]7\$7xjtq&j/B+_l|7ඔ5RZ_~Ϯ.cQؤ?Zh, CYǸ'+:*1%!> ^aBG ї(sN̨'cgozp)'ߜt&xGaF ~i8^R܍SkgJUv]|[y.屿w?! hq#bwk-6Pb_wwloiwL lh%UX1v_{c$5'ow㍹ ޝ3'3WOXNz$'q7*(Ш ?~.ѡhΤ//,~l׈gmFir>c^ل1? 0$[!޲{k"$e^q5B(4gE*Id4RA`*$\ Da1ǣ/I]e*GE!C+KzĆE#z2K|4BǷN8Nnd 풮BMuosJ nVZ~vF^(#iou^׳6إnPNkywjMvh=\nCE%FR`.OyC[4 z}<|Jlt==,;7 :"apv߶^}{Nd0wD,$W8uXYPZ%YHJE&L!8"RL3PGW$85avnIvW9`W$GW$uW(-gኤ WRqt@\q9PIU6op_u'|W6]rlvɥ2y?S ccA?Y>-?'i:&8~=5{-_&eË"at3ά$ʪ:ǡ!H^lS~b}{~n'cS͘2KU0->_>Ե|{<o,kZs^߻JKu77[ƜoY@~Ŀu4Jy)뺟A eFC䴷nNF''Ӝdzr9 _ePz1-LxᏣϣ:rM|aas_a]}ã\?Eěӫgؗ (z0Xݝ*ײ1YRWK>|saNYRƅuW~jQh&*˽T9mL1,&$*ui .*o@Ef "*Ȭk8IRְ8i g#{l[ Nn83-.el}LQ@BO'|W{7ȺԌ~]O;dNA8]ĜXctOd|1 ޫwXüX]B#}x!{hⰼWH,8$AƥڸJ1š̍U賂B_idƊaV^M>g eL .$"")3Rr( | T@k5q{K+~+ `ۅa?ٱ= }:aqh!)TaE&='ξ1iOYiŃJLZwN #jY28s:F (.fU;xQ@q@7 4J뱱Vgws׌-&~|͎n{ƈ&DY1 p"H |L'7F-pL؎YWtY:]b.,[hB̃`kiǖ .jP>꧰9XԬx/sV10K`}c zgFY&ITâ1X#d AbEF3)y6jE(T0Tg}e<&z1'ڧ'Xb!&=sx639>7_٫`d[-s ^uZiҧfiֵiurJį<(#&sOVDiTӚup<<Z?~,_OI,R8oZ?5x3]]m]κy[+){[|}M\ ϗ_PBp~_bOX8OWJ \i lY!4M9*.rT/${_f_hS-FHFoW7M5. >4|C"G_NKczF[O7jZ,7+,gWnx6wGVjkX௷^ך.yddL5XvX|!#Ur/Z"[b頸ɖILDkLV -J w*13@%1Ql=bU|=$"ځ{oAFbY!*'rdN%0, h=:9:(݁~w^zêyA^PQ6}A~pT&/UЋQvX (:wW\!4dM>U"XR/Q:?F,e.lEVY,Ig̿Ss@u1o-u:< .ǵ6օ.\ΐw3gb`Jݮ^1=a-H}B 8ͪ/ъ.Tva3~.J0[UՋx%Ľsy`G&/xkfH/ȐQ$Gn^˲{hۤOPFl{LHKjk/oVJ9V29̵gx(k,J ]'dq $nˉ"m:Yεa*8t6} 0' E *uqΆYY@/&Yn t\^{(CTνW%"I`u`O,tn|G#Bl43qzd\;\(>> ӬxAIU>5v߼&HF2<ֻ~~xأuRR:jJ*]Wۅp+]q 8J*@f8dn8ի#z[]_=YT^7 h Z3Ru=5KRqϔJ < ̂e. d\&%if =rKK>R&R,Fkr]&v* `g҄{/3t4}>.yHyR e2i!.Mkt98^Ů_IRUjm]_>ed9+$Ҧ N+u2]z6ZFq'Sqp9`3/ ƈUf0=FpC+wH'}~:hi՞__՝>Qº\) 8V@4 /T*;<2"xXNɷݠW ̫(sz|t6]F~tZ:gv]_PbTŤ+.5+.ۂtLWlK!V75;jG;qAI]9 > ^ж?3Բ)}(fn}wn-O Jm9E h| AY-#8pnj &r)0z)D&vhIJDHY`:X391%$^=NPj%"*ͤ54j8ILOFaVsߵ2ͫ)ek@GpZzH*.0vVLgf12A*M_~S-Ñeqxx܂ Bp\2E>f"Zg REb+ZF@7 "1S}]( /2hsي]VJSrg-X%cqN3zc6lhdڝNZmRkCj7)ǎ'ed"ͫI&e۪hkYcncPzLb~*EHJd586WY$QI`F* ҩuTQp喳`"}~7Y_Vy Vl=z&H 2/*^ g]`)0̕uLU>JJcy橋W'+^ !sfsD>B2`8b!Ȩb1{%P1rNvzв7l^I"M1u("pkS ,"6WK]gݕ\2n{tLXmSzhk`Obj0+ `+YсCbdylr0wE'ȯϠ{\2W$l9p22p LLڱ9> ݇#ޔ))8속1ަg?}Xpl~˜X3cF.m<$Z)"Z0ɚف!RYN%|ؾPrI1^1|+!Jy$(!p W6R& YZ! }|'c!is@~sԞҫ)!pEҿqijAwlN>ॅ?Tz)3X' s$g1A|t,SzӍ\N!uXTlZ}Sv9cu~1}y%|/+/RRu$(5475&~]Ljɻi}Gi6d:|ukUדqnP{2]2h%-MZznUҪ!|x=l[85E_O_72o?."MZ%&Hy?77'~F2q]%V&⻗5gJaGACöw03Ԓ~jd$qY#t ̓G!X[U1@{ѰAn obv5K5irH=z[5l)g!Jh;6mwl^Pm#(!O&x)ѧ t5<LG뼒Asŏ1DBe6*t\Vj ŏ-oR䂓:)2ra[`sG-ՔJN_@ @z+zY}qq;=v.[ܛWϾt3<}cUI'OoH膴IRx=7 TވwPu(Wp`3DkxO}{`]S~5g_nJُJVcQGNHתeȭ5np>~߽ߕ`st>60)yH&za^N)&ep[Jv$9su#fTxtfUz!LFcPO$2ANˉl>%'s儜RuNgKT8QTr9QM='Jul^*' ¦ tR)iPzrmKpEpktq-%˴' tUiЎ߱ M{PEW'c^("n$DF;b PEVN@*j hvzx4Zo ARPUkBt:Q:h%- H1qk ]g|ƢC -MՂQi{u+bf=fbX:Nzœx]V].G>HJ)V̩J3I# ,\]yӾL'j=01J AS u#39szhJk8]#yeq34mModŴX*gH:ӧCϺ_Y_Yy\mpґjpJQDhYBO[xN 20>W,cz2\1IB T&H@xF((&H> {W*09PhR>ʅ*CV@]#cn evS.^NO&^%04Iy ,z`0x=RqT~9{ejW9 ^097Ď2]Kw1ObRefdfq5?,]zn>Kf2gW(^\= ru.ppTWP gWY\UR^ \*L\e<⪳,]+ +vFpS8*+y;mU7_"\I0J3+X3z6p\*K+: f)%%•j#~8s3gg*WYjIh} pޜt+?d|4hpr~Cg|̯-0׭mwab8;Jju8׸lp <`qb> &ت=Woi rB+5 _θ|? CMG 5C{5=DMOZyMPz"P%Z3+a`A+6ܜ*/|2@Y}T-$T< d%V_"hai)sb*'Amy`NE}*q+\썟JP\RSFo3lu~jj=_LLDa~y3=U؝w.zl>[xǫ&7o/EOY}ޙ[?NF;tXM"U` ʴ)6TFc0oL>/j9ao<G5ew!gG8-j%]L~L\nwx/^_f+I=D Ƞq(DqH@DI@V#v$V/QCR*nO˔ **XJLDd:r"3"eKX^ E0Tx㩎F&ka,<҂F0pbA˸.' ,n1{_?^_{G(Cv 5FTPM ͥ2NVr&Stm#H.b1J&ijb (w# )K$)P5g~'ϣݡk"쓓/dR|C}\˩$2pi\}3wi ":T19(W F 0b- EB`I!Z\1uh$ '،^I SSgei$EF14(OVBVLزU5OÐ=XfGde7z NBP0#$&Dҥivq1;ҎMYYԦGރ}O! ddT`Ad-hA((# cX&Rք'./IHeӂFR(fB$^ȴhQu!a81xXL3l-"BaD{D|LI JQR[ĘUJ9 g11iǠI DuN)ɷ0685I*n/r׀!G= wK'Քg!.ΫƸYLKZ"#qQɆ`8(8Nj"0 :MP8{FYk4Y!:#[#Z}WuvLn-.Q0ws?ͤ$WX^>{H"~wc @{vRvża!sX7bLxg?28dmmu:Ⱥ^kJBj>qJ :0}6j_"eͿ5 NF̉ro`:_O~/#&㻿`O`RnԊ-ߟ%}3X?yS]CӮbtu&\`1}w%P Jz=|~c|/R {&=? j~7cuU„*4 rXRmօ9l_ ]O ЙAUqBCU ,כ/]?ћo "B)Zw߯M, nYMoP 7|?Ykzwn%,𷵗ҹH19NJТƂ )T.:*[$g&߸%"^K(SJiNIgIꥍ8_Q,;.'ĒsNoY=_VkyH5e,!ďiM2FÃuHmwCl*93"j:ڞ2Y2#!齞$0xgzLiҊ3{=MJ2z/z/{ꩦ48c LIzn`e k+c1T1 3E:Cʠ25⵩s6qSr{ ÏN*ɐ<isX&|Kt.PHx3jg>NLj9+WHk%iS"L&hk^ |K& ܗ'0>Emxȅz&u|7o᧼Koθ nܕ]{~'ӟ,m.L֔%i~U(y,fƹ)Իvݩk]W~HIpF1HCԖCR 8LfͥrZ#;mDy+tw.~[vk;cАӹYBK|7o0f]ܛp73܀c`Tyː̘t43A qoF#:uf̳ttԺNEuPSu.SQ&RN.XpLH!=T2b;H+U["l |5HX/GJaU+Ǥ6=fZqOvݞTTc# <:UֱEVZZjt:8@ fJ*XE 62( F刊` p'5Mט85x#zz<'ڋկbk3S:t,Y{AgJ3I\bNπ 㶟IRܝO`  NˊZkۯ@hD2HÂJB"I]H.@W T[] B2ld 'Az)PTs祉GdDTymlM!x$ )[X1c ^˘JRՋipTh*`S qG㑛.Pi݄NDiqZ|m 3Eʘk3"5Bjb5)?"qIgIy5}W=ҟͣ!; ̩|O\TU1a"ss { t~cH`?3)ярX3x _?yetd!F*E!_ǑB!B-c-FTdN]{*0#xn'eft7Yl5rLG uq򧓋vJքg&kϦ8 UTWݛtjV}ARI b{{8Fg44^lݿmwϤ\jyzl|HW(ǼϜ_=Y/*pts Rcp65ß޺o-{Nqp{m⼈t D^L)l $i%o@Rnc 1 e{K^qmdL2Lm`S⃔8sʲ!37Nt"nH[y mˑ=)G Yuq:Plc"l2Ñ"ʰWJ1Ž@:Ri3(<5]+ϑ%`yn՗Jb; {4YM,R  =&pϸ 4 gEd8˄7;) uU {ʼx>ԗ%ɖ7H@ڲR[PۙBm ҼsyYyVcr(3ff6r%o\t[FZvQE[e24ţڋrJZG\ )CP,\h(o!@o\(j9$$V8m0Imc} 2v8-R)+n_- \MNpw_ l |jh,a`>`2+a4qH)8E{dccQ|5BUA:Mi`ReJp "P `LuGFK%rX8cqB]%[T RYU4.*ZbլfJE?Q25 D4V O)zURU:F7cVZ= X8 L0"N:ôکDj@8ctE:6 %cw;ůZ׾̺+7`PXIX4i&:(aeQu/5sSub+x*B;}z& [d;U+}w7{^6VcwzRevvĆǢS!~jmy`P*RmDFYDjZoK(|f p4m2=]=#iv占s/Pg0ㇰ%5dKb~8Se4n2}I(fڹ@R Ud&eo 勞Pz'/4G|Q T"<&5 b\x?hVLA 5QB^T8/&񽁾zս=\q; ͠Ÿߧ6AJX%qԵ^ЯHZ!q`bj,L4TUd!Զ8xPS\J?X~^ׂ$Zl Z6"8Xna=Y[hVN7B!'qܦ:$[ًi1fweJ`n,Br.sZ)(,}ّG:ce 6ՍG"XVy@{ѰA7k )obrTuɥ/{RMxi6'!htC#6PG^ uSÔ43!8@ G=QX'Wz#4]P3q20z}B m5 ɻOR(9?{WF\Vݍ/W*}H!/O[)|ڕ%E+Ji[Dj$Θs,C}4@;_]ζ2| E椣ZXooE H*n!qH[`noz5uu{4l[=z{sW_٩NQN&\Cv|OZvHh4 Ìf6pשhLnGiR ᲈ2eO?*du>\G:hPPwƒxnÎK)ثŪHJ kfMiLLN ǖj1ٖre ;Ifn7~LT|˭`2rƨ5QT>V)6 ap_ajޕ7n-09oR uO,GQ UP3W6qboӉZ$irR+sW'LⓢlNɹ*rjDt$>OEuqQ=Dwd p;sp1*26G׫JW֍ Rn]Rvd ή2,hxy59TD 63o}:^[}jD(YY&^hB[& GM.Zb;ܶTU!BoG>]x_}_xa^ >k0lLwRSΰZeb@sފr_J)_KAٹy7¤\q&Jv yҐbA5L6$aY_wOYg}e @AU{.d)YH)ɬK(~#"V*6FAb-z@ MmҬɤN:xhF)9Xq{uhpV2_ДgMf6L~w3tYEFV&__s{~T", 6HnuH!.:EUG骣 ~c+ZFЁOܙ~9*m!#'ʼ1F?*oW'W7&z3hVX9.ȧ/l'A9χWtGKz,QYXYgDW|p΅:ZkNW_({?̀}9=[Hx^EI$?,+';8[W4:6,-%P1{ٲfѪUyur| *SyXɊ M ̜!6c+D #;ffU;\TMG;2,3OTG=VVnv l'Fhb7JptE;-tkEtNl5v.tUG95uՋY+̈%EEWs骣 f#+j?#`Gz6t υ:ZNW_c+qeN 5d4h{骣4j#+L`n ]Q~.tђ:]u:BV6WNlayں{v;;#|Te;'@K"QjҴ˜1tΆ7nhL:EU%]yoЌ -G]uBW1t:Jry6<tYo C5xDZI-ckZo~n C2) ,\oA=gyTghYM^Gs]枻#_@S+ `&u3.wt;Cn(Vst ];:X?a6tA\誣%:]uL ]!]R4+`d6tNzS+ SKzƛѕ(QΆ3u&OW@),tutez8u3+f3p ]u~tQE]#]Y_d $@NW,t2te6a[@xىhy|R43'g4gfC-tG M#M{m9) `a=p3gZ=u(^`Z+WrHˊhy'((|uvVDUykE/~ L9,]օn(ĖEt% ];?#`<p ]-In(E:B޺ѕJ2pM ]uϽct%NV ]%] 2+*CW Fc,Yʇ~5N~;=;->ViM[=/7݅'?;追Y܏g=?дu{ꖵzGB߫$md5Ы.r~jD^wφ뛋+BDV [_~մ%L0HOo/c>cwL{k70_O}ޏޝí/ړw5qܶ$ S8RzwqOT]c^?|YN@E~"yޣ[4 {s>#e8}nr9>?zO⯀ ^ס_ȗïZ w}B$V\х)eNluY:#]-?qs0YhkW7ow hj8=/p%ZjQ義pS)DTg+&2,R P$($WV%$х:ffbȐq1B05ƅEKsؘ:[b4huggIcKW*00t(XJ%m6GAVVŎkW>yj-%N%c0Ќ\܊-1d]"-bՐ!Rk䒈kFij6LuLR-$LaGC5pf c# L})Sk%Aщ,YUZN1* fX1Kڽow'Բɺ)Sm ~ԩb!5Qu(Sj*0?@p+^e8ּk94{LbMV~Rn+\B{z]FLPA$zsys;l-mj V!Ec}J'sY\ֆV{UNR99UJrZnւZ);:|ߐ~L>Aލ֠5l'XY#|a#Ҳ\0cU6eBH5/$T[*4TD@ V}rj5.fΙ}(U-6v%d123,HXU.Q{V+!=VR]J q;ahR!^&| E2Um`L9&Xq EA)1حTTPtB[ Z ~96ՠ,؍jJTZ TldKJ̃dF'ֹ c0mVSlNWuT6 ]+juhc]P+TRRm/s8AH(oCPf-a)-%uhʰ74DkjA8)m`wA16x XV\aKƊhQfی4"l`ǕØ`AV^E!+I#Wn4 i/\ Ge:Jd+X󸊞k(PB]@ؤ 6CA!5ad\ !,[@$zL !A, Lh44ѶI"&:!{%)An Q Q>TKHv`Tq9I0Όx)Ed j;X?e Ǎ` CB8۞AP{Ki*3(@pseĸ`ڳz+Ex@[ (ة$$ج+S~+ufX5=( EKDT} QHH$ֱFhyD40PLcL6D;0L|u1fdȏoVM0] ?b6|/WЮ{1.UQϘ5 N 3SK;H L~ ! yug!}WCi߻s*hlKWy9m`-x^#x:pvPNLJ7#K$I-Gҳ wSx`gv!Q"N^O9A) >@HI&bZD^2'Za0=҅`a>S&H.:t_udP{#fH܆`m ][΍d!*T?><{ 3 9̶",9 F`;,d?8;gZ{8_!uw~T?q?dsfbkwyMRnZָOXkkm=Ҧ᰻ 'ȋ9(P"QrD݅Z:jp?u $$4CQADh n[Nu ƈvuPt PnCjwXі9T3 ǚo6 < .IȮq#s>l:% &kZ#Mld@mց3jw7:p}Wp2,6(-!3crSR3tm4҃>Y<^4MmF΂Po;6R:lWo{ H~ Ғ nIhޣ>:k>{+Y:3b(t}$tclB?=|^/]Wg𢾨֯iWE˹Q*:ϸ`0H0hf69!lB'jH(wm&4WVnk3[75"e uFjfc6ƓA2qgvàDy s 9|jn:dsC-tyr1B.~-:(ںE;8B l$r,@z|b#(!Bz Ʊ `+I$Zz[3w%ɽB0s@?$oN-Ca™FP8'eQSS@%4vgu 5?Ao1rC#Y{kou3铭ALrۣ&4ն171~]Nv"v*䬺` Z^.u:: k (tu.-PZz/v^Ȭߟ6Д n`w8lZ{:<5ߡJR4]Gܡ'Ɂn{Eax7XlMU@ ۦEvlLzI9ѱEHpwSJ,褀׵5F !KkRr>RHv#BPQc#4s4eAAtKBwkqjp7b*ByU A[qf3cV 8[o2 Ym?W\yty^zխ_`wY[~Xl$flٶ.AEm}q!+w٦Ig7y/ί:Dn~[nssju;greOWoYɝ.V۷<lo}9}nėж]|W_sql%zlniro6͝=\)y9sn9Bڐ&e:E': N uH@R': N uH@R': N uH@R': N uH@R': N uH@R': N uH@' Ρ/ p1N k9L+(avN Œ N uH@R': N uH@R': N uH@R': N uH@R': N uH@R': N uH@:Y' uAN bq \Kq h)@r: N uH@R': N uH@R': N uH@R': N uH@R': N uH@R': N uH@rrB9gn'cT4w9# tN R': N uH@R': N uH@R': N uH@R': N uH@R': N uH@R': N u};N{K#\Z?f~^RSizy}zA_mwͺYBc$ęBqp`\"ƥS0.#n_q% | qʕdAgmeu&ٴYaa`f[ՋY~^ǥL,Ӣ;ÃL\qjm\ PqWwsW7*hjyѿ~QyfU Rm>}5}tؾiO=p4g Gkr(v\^ ВwS]2DvjȮg&]8F &ơ<- #*(]=m2D ;.q)t%hs:]eI%DW+--ũӕJWHW>1ѕrfK+A&OWk1xtEs,i bJh] c1eJW'HW!RAt+KqJPfS#gWV6ͬ|.ZN;2#ڝa_MJiA'FTP&;EN+i.G!BWtZ+9 [HvD1lՅ WeăME5NJnzZGPiU.ij3^PJViYUPѬzY9?]0r,+fBІɿ +]}3tpp)qئ1pٸ#Nơ#yơ #**]=0ѕcb1t%pm*S+A ҕ7!;.pJЕͱY:] J땮NȠKRW RW7CW6ӕ)]]ɡ/#tϾJR1sW6N(Y*hb-CW7CW6Me֩+9Ntǹg5@L;M'owMIR\ M L4вQA>Ev[*gNFW ] JW 'IW9Cr Ȩv٭~̸m2g#AhTY: N->c>SIS-8\\Jlh굋:.]Ό˔ojtdgEWPNă4Szl,T ] \oK+A&OW2*]"]9OɆ )Е Svt%(U:A"JX ] K+A{iGN3 +u%h%yHŒ יRJX*n]Ĭ}ό.@JNg㛝waJai҂ҩL݀|E嗫˺}>*{}~|-(Z}/_E;|'O ߐ9yvMCp0~f֭OsWO `o߼om33fa>_=; \/a3F۳od;%FwѫoUG9޿'GG,z Mr JS) MɿT)*4*R_@GTˋ_4Q؃i~[<6yHNt'%gD3:>I'f!r>;j3d*./㤺lH|[9,P};Bj+'i 2)E9>pL1lP *xu;`8$kFM8GwJ hd '=#BkFG7kqc5%MߖG]dݳx6oWW_6B.wd;e lu7" aj^f򲡦51vO=)5 HY%fc_Umh0D7~ZUzN@zrJ3995ndBi$GHG9oy=.ͿxgMʜw5LX| .=? Fݓd2Q ޛbԶ%[M]m ʭ Tmڶq~ &y y{rϿ2~;5#`vL<8gM0zI>p`ӖڔuE5^v9k73kcYb6xf' n(FT 8XP,},-E pO&OW2i |t%I<Ώ\ϽnL6<DŽ)U}jӚ9?,Ww}%d}=ƛƴ]9n91!1MKq}zٻƑ$W ?M"Y,гꞗA%YI,E*$d@:/_FDFF MM@lH058;%-^l޶ΛHǮKMHKM(u\#]z Sb+ WPʢUteQ2HWeH]Q&ۨ]٦BWQw(jtŘQLDWЕi(teњ[WRc3s*uH V& jC+VӕEٳ2#]CWKh@tepbWW Zv!L&BC.%8mGU^jYHW-ѕ"gBQeso !T hGeZ[ !Ҵ/$ˮ_ׄ+L(teTxAҕ֘u')|\dwyKO[4H=u%Guo! ƒ!GY"2$.}Ţcz}#(ٹU;_N/`ϣKx T.>ESxuyBҳ)z iBvw߿h F1‘ֈka %qOguM&┞ƹ1jLf!0vul~_?=G`k5t[Чyc *bpc=^"xdS%Oyd ("I$D \Œjn$,38ylR1IXN7! 8@n0U֔z8pIH%hTׂˈv]us&JޯNՄXɸ.z" 5+ PʢUteQj1Jz`R 0$p;._-=]Y}K[b X]YaWCWmWgXk$xtHW\1x@ty Mѕ++VӕEHW+!9XDW ЕEؕE9ƮIW0:IN( q g(8{(됬JL-\,:s%-viz844m7JWdUZ]WV [ C+m!%p`-bpU0f}UOe߹WR2s-39 '5j-vY8fQ mg=Y CU'4 % ,ܮZh=]YDt5@3+ ,\JB+ӕE)HW+]1p@Πh(terw(Kl#] 8+ ب` a 6DW-3hQr1JR!Ů Еj'h;]YrIWF 6|οLU6M}Y-:`;Pm궭p6K?}>tH/FQ=]6\|'5)}fY\PPdRH-D51h`\= v2;,ʼUt 1i󷏮?'j֊Gjפ'Ds#49Їr{wy]rq?%e?GBmW\j"WvV($0%gxcRRʤbml6e1Z4xEL-Fr>(>+LmX<3"jW]nrZi&2p -;4#M55:: ~ ,pR~-ZmrL&]ن,cӊ#zv{oٖԣV Ba%g•,SѢ5"Tdlߏ*`&$W=Zpسt_R%~ AWlCUOFDWDWc ]Y]Rt5D0A+1,ܮ>Ctʢjt83DWsqzpY0ΠE+zo]Y};n1U+tŹ<$ +,+ Pʢ彧+iЕ%Q ]Y4h;]YRt5@I+x8v WjhjXe>t])&k53Ob$UuC4fxPhڢ]=b*HZC+p8te`ʢo=cnt7"}UK^aOX 4%jա'`+LZצ:h;]YJt5@c]Y4ʢteQGHWW ,`#+1,Z{(HWC+!-+j,\jB+V(jt%4V!-`]\`=,J6ƮHWy\)ҿ-30>!B*ER{Ԑ[ B*-`M\Ep(4mv(J>ZCiE$W4 }Ste2 ]YB,JF ]ip;=6%&?v #N_ lDb۟˛`;w7}eS8e`}tMTjiM#k'x̔dgXGK>*9Ė9(̏w.ZOK$hs_휖4ε1\Ӝ9Lʓ@7Y1_γfV+SN>g?G[x\Y:.dhΔ2nȊ ɰRĺ=F}2LCC)ؘ4--oFEIQCW !`^űszdZُ[= fmGQa_k)sb2B!UJwzunZ+E@y&ސ.\h>tZq+O,wSkOq7Vs _/z}~y[J}O/Nke۔G ?|.0/D`} )!cn/0K!Yu"(d\8LbZ2YpS)%ZC8!JZXUt8\=+`Jn+F^XjNP)Ւw*+ E$j#e/.mj:ey0֊6єERȕP(ظ>#KuB)YQ&ET˖D.VG\Z.p%u{ZxQYśtKPH#Fq:WI B;{JhRFv'ޞ*Xr' t#|c!Ye+_ؓ?\% '!|sLL[Θ1{Qݽd(+ ȹs'T o g"ՊxVuZ\&KGcjӅu#ӯO\!i0 7ć[MC;?>:Ċs[6𺹾._M Mɣr6y8VXd7^.Nyx"h)"$@8S(hMYT/RfQλYtPnj6 j@&MHD"i8ʼnaHH?I? Jjr7Y;Q :_]\[h" ʐ1yb3Dh=^7x*wߩٮ!Z$}+Mr\ )8eH S m\Io-j;TZGy%֘Ȣ B9J|+E'4L8E}R)U;㷉;d֞dGGOC bTtY!VeHX %'Q==ݪ- *w=:+d+펺n][L˕=ͬ)u+#zWJDkgβS;QƦ;J\<<)[4]b >x/̇b]b{6DQJ2OM]@Eˇ|/P<ߍĄ7ÅNENܽA}kj!X$oc@s^j \t255s֢fo (ʚ*j7ti1 2V)JT=&<[ tQh*ױTpsq#ѠD4Żۭ Θ4ZMy|LCXjze[yЎD.n:iCdC\~뷿v&Q%o0@=D|`QmY)p΃̧?|(c1/AcxUr}?$$(?Б{ 87!VvM#Mm(lb@0.,Uj yCOQ#G=^m4ujd"XTmu1ٔv z Ƕ}} 1OHq!.Nh'BW/qgbW+3mB@)c+t5ȭY,lލ©1R0; U/ DrT08(4JduZ{J}W9MځD(o&(d]zRТuGW,n~dHm/v-]k9ﭺ=md䊻k*1ֻ ت~R8|z2 UfKQ>Tq+ n+Ţ^B:Ц?WmF#jX6`y6ڬ8. HBãq\~5fEC߰j6%?LOr#vǶh<m?\kBF<%C|i aK}+J:3+~ VTGO^'"f鐔>ZBqMjJ0y?μeF[Oͳ{V[n=%%jyjSc*h W_r`O'lV&"՚k+4rHPގdbE͇:Dཷk|_:TWpMkQT{Vx0ÁvV)hMā殰ZprΤ2&5~Y{U| =PvДRCii A ]yE($%fY4e<(#(!IYι= rk3O ژ? L]suPɌ^aBQN!Pey_ 1tXkLVQƀ""TEE``$^6xݕ]iUAӷX"̗"sw%f<`oEm;BoAa.ͅfJٝ~SڻkirVsez<UXA_DJ6C9j@R2b-Zi䎉S1>Ajwp$bKqZ~ p\AWB@Ig.c {t6ebj?|D1NgcwIWq ukgq*n#iA=bEE8k;JL|O1$E.s9Vaɣ|슦]`4\&Xb2,})0lͿ[CoYYȂ!\4A" 2TqiKF'.TڡNb)2N;qpN E.fD2iNmZkx*  L !DhTjg4?t±"Vc$e풔Q@Z&) ߿փW~+2HrBJ5Z9AS~'D"v݂K54ӀJlJq 1pGY'IYet\eclEyǧ^!'wIxZ]b30i7x˼plYؐܨ Y[ݗ]gsY`Ƥ<> aĈ~xIҨ1lp`*1Vr04(5MsɈNԄcJF5Ϲ.҉{yk2jQ,pެz%K 0 P\ L |~g! D.#8R )Ѡ[[KK7.o50K9&p MFs@L2XTca[J1#JO0h*9qJϲ˰a,: $i?RוY[}P6Cݤ&!UWɠDa!`%VHJQJ$"h2} ` _~/~YAnS6އ D x_S 8:@%̤ $"'O;ڝ篇Ҟ}QU6-1nR-+/`Պqq 0rp_0SWtRa*J)nQoWfOr;9MB#㤞6"򺹢W՘p| C] Vm.`"ds\]˩T0FE~"y^i4 <'ms6j&O^!e@W-e.R Sp{Oq47FQrðG:v< /X9HBβ\)Ǜd X5&&A5:.̀ÈHڿ.Yd4iu9mꘂO!㜸I 5(Ncx@;麅 yX݄(YɹEsb,|戼R-dL͹Fh:A^}5G?8;b,d8 @#__:ybUQbdl-JrdAwVl+ 1v7R1#hoA}eE ɼ,þ{A8kBkSVd1/Uަ;7f|r3˫촓;;{s$<9Lݫ B"ߦݟ&]ˑ)wSghst9f"%cNu헿/_:qL9y+Ƹmg#a^iW.@cQA;s Cj8u7ndJ۲Q:z̕o#r]Ř7I!ۭǹr&G-7Od`+bL"5x2Ej7ry(v \0 ^u2gZU u~S4&%Q/ƺI9H! :nlt'"&[}6I x Ik̾!n*̡nRkr-Muť@ܿ6+jr38Z1ƍ'OqGϩHg7\b.ę+ 4sn{3EGKajZ2?f!>TPQ>qT;wE'bSu߆O@KXam (JDRq{>pp`[G^-pjMe*4* ؍DV){W˽Rde_mJ}wtl/Zj)kaR~njϒtBdZ/0gr%$$K-,4z~^U_g:=\]kJz+HY%٭dwOH&*Wii`}hڀݔ1603^WVXlwꟳBW]s+/(M5xT#ŦHݚx/ěld2!OJwr-Yo05sʹ`aN1K3ޗfܔ+ecäX-!¶lLf) u(h+t`9$LZ J۝LQZIz1J0|W\$rA\?p-7$M%ch2A':РZl2=z{ XޕxkmRlC%/PV>ƞ=XfnQ6yk,g;8-YݰPPiqnWN|p VBFsB*8 hc7h% N|jP"z52 LM[]N\k8gM /@#eYphX%VT^pf,0pa;YKlYRT!ckLG">9r1"Muebİh$ȟG>^7K}_^O׽Ӟe7*J1uĺTG6+yKS7Hx)e J*ɬ`dy-FI2Q3!nF`rZYVr쨸j%f`Q`R)D*I0Nψ^ي'ЅH%y&ȭfr~~ k(`'N骉Kӛx3^ik"$"Y"g#nCVf '' #kOֺI "v9,ÿs&Ik>Yu߻eHKJ֨~A'ԷF-98|`L#@ `QC8èBF2N(x?SѯtDg 5K9;V V.Z^)LpRy&2mc,rhUq Ϛ+22a5bG;$dHka&c7#/" _IYT྿r|\"=PVzE\y*PޖU?N%a)i*^*h2;ɩ+y:bG fq7 8M4 pjDKsz΃xϞKP"UB.b>Fл|F`-XpI?#[Ʒ݌&2M33r?uDwř5qX'\kaՆMP2.Cpβ4o;" CjBsn@_|^duxDT38ɞM3%.)c8%Pq1-AH饓,eN&-1ZFH`33[RSe\~mR/A#?ҰLa` [VV,(4aca,pC6Vy.- & >iz%H%nx+mR))ep"=h%ÿ/Lcb*d|.S;uKuHi/!e/-Uu +"hǓ꘣Q\)U{F"0UIN|e0A % ǁLj Uh/} @%DE1^n%R(.&zNATCc`Ƹ5؃5TH;P uȸ 1'#ICW72I@u-'wS#dAgh%/Wi; (c< L~b"tX/Tt^U3,ꔗG$xAsaOH,S|Q(_rV>'ayJ VWaw& =Z%I|m,r3<_I N1Hdb_†OC/H~7FN'&wW!eTlFF JK7p2zwXk&|dzEQLQYvj Z9;ڨ;SZN-J(U#hgv~;G+W^%1hMmKo;N),0CD6]>#Y!s9#go2 ˣڂ+={gn\ϺV%5][$A5 + hb(1Bb§~!}.)'FwNI|TӠBf?:lssPj~ڷJDHE$*VJ kDf(뷿<3YD2~=[* }>QX(;*mOI-1<2z ]( c}D 曗/^шW!ų2ŧpcsI:KQE2UJ lEW[pN5Na՚!D@%!&34睡Nö@x"/~̈'8Ju#4X\' zܢ_p0F*Hs5 (91H[oNUȨظ]T&FRf\ή#(d 1<׌G eN TY+ƥrVm|Wn?oNeG/eW2z y-PEM骐q]R|A4>j¢ J*d. OT6+/ J5kS5h!F.bk>sRƯe[QU'c]2S/Fğ(b $2.niN:K@Fњ+d j ^!c k30sgShtN]Jэ]eQ9E1Z﫼>SXSX3qk24lWWPKs=QE%},hBOFe>Ԉ,0UDLTޥ2[ n}FJ?ur 0[\U{f-!:3w|:we6:IAB_cd,ȰR1 ja||q4421ky]mlnaTih,^7^# S ;CL߂hd )UqQmDG8aa wBEBK=Mr0$Btyj%oXGl3<=jT`E1T= Uba#Jq)btXÇꨩd .ǣ*LaƀejK<# :;pHqB9'q$8 %*$}"gLiO$8ˤCo8iR-.N׎r4%*Opjijoww5tV}̴^pQ@֊4Fͣ~Լzc[M uNׇ %\UWZ1jڊ (읓11Ntgl|P*ìLN p2R)ib1[t݈)vB]O6ʞ+”V(t ՆyNjjWT/o9ܙ1{_ug=XOלgFFQCC ֮9۬d{n-H;w$Đ.n{eKNbϭnѺX\a6o Zh"Ʌ=OBC=pK69Q/9vJ_gλTjLj=qRrŴ@'T&Nӻb\-Bv6) yf@*ӑkZܭgzw_̶ff[ŏya & .ޫ0s0*R~YhObkLvoz"$B w"̛?54@$ Уf&5&cp"il Ѳlm5z [Y̷IW(/n6֘+ȝbZh)#.\ ګ֟0\(^DG޿ඏbY!GYGbucx;\Jj!Z;+'~LpRy&2gk%b N)&ƒu9zַOJ1De򽘲+}Ll2` HsKߠ<\6ӹFH0p}|KF$fVt}sr[@>(JĊY;,d9' xi b=+-S \raO%HZeMpCj#c{lf'BрNrōHe#nFIҖ19?b1~β Z[Ӑwbp*fj6 ˇd~%哐WJ<,OS0A7\o17_~?x/HC#bq5??|1g:(rp6OU^}m|oT:8x][o㸒+F^vyftb1D*I۱ۉ}peǖmɡ,qM70Y>uWU2ӢS.;o<2Ksܸ컞'^{, F z㴷|:7q}5ιep[*gxPΒi$e*DW0P hg5q8eȨ[pp,#@d14ׯ"1\80$8i:dӔt-bTuRT7ùN*4 N($KEQ|,@fY3X[$JfeԽ*j>P*J-A4" N( q/P|XiJp6o9TE-|gP3EPcEpk;#*{x< !V$9 *J#HLڧ[f)Ңh%ԝ|!.-@V'f6;yRmr/1)}4. *EiLP'!-F\\W2viɛ(xK< 3DAWL(ۅbЖ`+MDLXme' su0w"qY:Z8 Vu!8hPiRhB? Эt}Y`5MkvB5o}ᄝ$ U%˹`:'OUkS+v[G)/FPunA*p:}BŀavLЦ,mH՚$*/x$h2ZBbu2 (A1έW)dLPYCҜQA ӥv1l-`fLdĕxmG&,{/m61 %2wCm]f}DmRLGf0FݮmMg*tA˧X6\S~{ >hҴњjҚ(`N@53@(n^`W CP̙-9H9Ƚǚ9Ħv:Agt@|=:  8ֿ%Z Hk ͳ`?%Ǔ޿o=F riЍG#ynZFr_y/WWƑD8LB!6 iqUW\}1q3E0~%G0cld zI+NEr9џ؈[gM^ƻUwFon܀B]x"/}CUhs) owW]1l;~xx@3Tbś;ϦOUZIc@8祥/36.yB>ցCFˮu<m׎K@!}/ BVpM8em,ôAmΘO\42^YnV7y XE_r`ChUu*/ >@NkɓM\˔k|Bi2wf$E\:x A+ɋl 3snEdĠ'ba8"MF9451qT^Df7uq"B FMbW{gSMVű.mJ\t/4SUݺٛBOlF|p8%2:RN952ƖWB;c_|\֮C,TF;_>F5.z6c5Z0 h`{C_tqFK݇hqxkAD >iҀ}nDEs8spȂJ%2.,$>UN:V+ũɃWH)rTXI:fկDeRj$#+lmP1_%MhMj'1 v{P#ہ 3TfW@,(W"zFjɳQJd|D'T)9Cޑ`LjQL38"-X#r*<1"Qf-u9կ롕bYmU5-(Q0eE68m-߰jd#Nndr+DzlT[W,+$,?mFۺ0T[}ݷ>SuE>"HZːt EaƱE%2:V#{7Trr*IYn# ]leU妋hu986Bp!Wr2>)c|.{/el(Qቱ-QPF14DM~ef_q~5% Iz-+OFݴ5ǰV]4wgw_22ϹR )0;tDHNLD49 ܕ"%}ScTLh~kU# hwyj]aXS|@0Zs~R{9͔+^s;qێV XT,!ѤlUm⫰22‘-MK3.qݙSO0jufd- kܼm.9npNHWS(o 9DX*:[|ퟦ0z4IoóZXjcz!Nr1ÁM̙Ro]=8Oz (}D&DQ*zoxWsVUjBiqv%hdYv8Jb;ᡣO [2Ǜ҈n6'3iԙ"_ta  Jj奯´Ll-\t$(پ!Ḁ'؋-\1 Է܃ )8B6lw jZ lppV!lgW+g c8이 LP>qC!Ձ˹"A6M즍FM\v %S4pU-yå>(}s[=I:| % \QAhk^v<JC*] !*\S6Z6^N! /2}bOZ~Pz =QX[*{xҊßPF;aJ_$\bOS\O`E$rH'}z{":BF<^_/S=k* i6#O 3K8Gһ1)8q QrX'/Sf x^)#oq.<1HA&# 83sh8H? OL)R, 4TuG޺si=2B<*^Lb;OL<ɥv@v`3x+ l \< !Z^^>`C:w=2 <39x_l8MܯNjfU~וnJJJs:k ޻W0=}Y|_HC >W> r%j>V@#p!, B<&\Ԟ =kC4&扎BmXy2z2:Px8d)6f_(eBΓ@LMP4obxt5gER%8ȗXSWbO<'}SsOXZkB7bns~,` |3";0,xLbb\ZpθRtS/FQy= 7I/ 2\F ŖL*3t sgPl4"H oE4On{ N?cq/;2O!(~ž,)O_)'esicAbHwcD5:ƥ$$ƩJV8RƂ2 CI_Qaj)tO4_3Kz{]f<9<ڥy488`<1n{.'̿3]o{wWotʻ Y}…#$JS idB.}>`& 5V,9`*$y)Y( gtr6OT-g=22·|>c Glf5, ׽?|2kt7|3Q%D$## je缤:k_qYoFc%t˾b1ThTDƻ^28Yː &߇&2C4?qu 4VAí4)j!mB6 Q'$)(ES*3 ]kseUjd -DLX?x7v䨠d r!}PTBW®b+, V< ƎS!JdNݕIeM _<<#ݍz`i2X`9h*%BY>g G "R fRC#T"cah 2q"!ͳ&`Q*et) )cduK'`##N[&>+/GqfJEekAņiM; C n5lg_6(RNuVE4.eLjgN4N-zIAKp)2(D-KqDjr%c3g>X) HMٻ:rW =%ĥ,C0v. f .MY$K:)IHZKg)bCLdB_+> 4NщQ{$dƉ 2V)ź8v䯋[k LHca3Z W?;>2EP8t"خ@EЭRXDyݪyT:zHTίow^DJՋ0_^3АNM8wr(Ez6~Nß!LA9P˨JqV(6bb%e$ oKװƎ?<#zץϵiVgŮ랉 z r'iCk3\˺7*I He&9,᝼$UM L9QJdO1!kʠu۝ɪ|@Z m/|eH48ploiTyeJh."(ΐWN.^zH7g]?k`R2R1Y2`r!M9N[Eo14D;[>ѭ=0 qx>.-c@HV@̅%AVo]NvCi)!jQ%DdǮGӏCMt@g L=06J8IA7*q)fߌ]k*E+++=IvO pgz6#Uxk1ڤL۠`(w ~twqJ˄fwGN7x-Ay|ׇrhA;v2Úc!.GxWs_  b bn# vADo(΂mxW w&kJAάEMnk[ui.}xCF=U;]fO=]ne{^+:tmzǦn6q8vGF()zAISG)c؜cO;9l ,9Iǜ &]DoNZ4"J5aQvڪqg5 3J3vr.viJMoA茤ܭSil=Q zc{׬nl_'ni2T>y3nY+Qqv"y'_Pdt0֐bOE?Y8:ц̛rf!<>4-k Q۷wͤfy^z!RNOf$NoXk yDV멨 <}=#PcJ-A'\+_9ؕYVOy" o?y/H DSv^FQilQY&1gLc!!;ȣ~yO,QrJ bֱnBQ9*QX0jhACtK*u{fK h%u J*B&0}C!.u R:>?/L LIKCvA)Kth>zU.2&B.VGgdTIź.CJ :^ɃtQӷ["նQQa$ɕrXIVJ!7jA*݁BH4Y׋6a ^+vR2@O'gB͚&T=h4)>lF/_TMWî"{>SCMк2LamG㔙<BZ-rW&i*Z(?әBZ]zϓyw^y^Oml |LOYPU&gvsSI00g.kD&jh''j̩E/)UiA멌Dn{fRN =C|DN**;#A8Y`6w~=.Kj|Ԙ쐵x_X~0(cz;K &y&aziW]倭y;j>o0Go8Vx"#e`jj֊׋V7_4;~AL)O͌_Áx]{ȧMn` &fr5'f^IछLPsgvz/93nz]D6C6yvK@7`N*֊4R*b6lD&jߜ؉=sjftr-1i"]DR&jw yKhTWDnVUv4ʞz|[ 2?\]OIN5`7 I\AzFQ5`̒'}ˈ$Xgw~od*JEfYfML-ԥDaP 4D) (YY,lfp %ߠ>{rF Q9b%5JutE )[v9!熘5Q&` Fd#tɵ AԵ6pSᒛ8FVp$kDm|Bֲ:7p#?+ |mZcYk tf a[*ðd*aP,{6GfXѱc6DUpzȃe8ӈ_kS|vSGA(QER1]tiDS`+19Y"rd3ɠiϛurXQqR3YSiTG/y KڪUy1N{yg-Gx al%#gi^94y&vj}=kj+ |?/'kC?Bh)[iyŻ_]vs9J}uNmoy@ ^߽|}+~G ߳ZqGlv տ9/ǗpWD޺uy0LO;=Pj\limvVj1 AZ"__Hu][uܼAw:3 Xb(b=515nMKmz}c;6Ŷ3jfC]ۊ) V?O//z;j#YFNړyQl׽5"Rػ ]JhqD_ jTNeĆY|52o7{z0zK##ڜ62&$Htr*t`lIJ PY7VIyǫ/&O5p2=~1h6K|%3J` y.Xgk>zsy+~fnv㕿/RҽUژ>P摯6D.`țr\H6䁂bc 1V^BBJ2e@=T(G\.c=CCuSf`ŕ9$Fo6L.r*n i #v,~xա%`B"pSԇbf̒kb iMm:@w*i^FX|P|uJ/y5JIkgo2?Xyf⼤?4ԏE{wuﯶ[URuOc1+tyuzNݺYj2pčv{mx t`_jD(Oޝ/)t88ݑ{_{UttJ}$cjX9}M%@9lVyuqPCtf(n]+Mc زZٹ7 >-s!L}]94{ڑۻo"IBϓE]Fj:chm/1A=ISZ#a>jזZN# xiSN- Tޑ>?AsnKLAճwEP%I&:Dbtdn?2d?صgǢucuCs`_?ACkxD`_`hYҿSFz =W1B X֙J^ ˑ{;7<=>Jg8F?Aŧa !W9(c}DQZ&蒚QղM.W#aѯx۠L~G NQWÐKu C@4S:j#4?P3_IEvB&?B 7gj3JVvIxdq=O;tYe^*^wׂf>1^ ]XjEAky[6zѳг!+>| v'ls'#!=;oNٯ ^:Ae/.0Ot[V42 w ЇtP;hBv+0גX5]9Kdm#ujo-4ߗN%K{U1:ayԮS/b A\ vG|w}8]ϚNq+99?,#Y</W})-em=A Cit}CfggJ-F_6d Q$>P)hfTƙ`J,gvf7oH;{Z+ftla:V۩]vC$џ #w`isV:9ZN& /9URpyZOS[@v]J>7@?_G^_~OW{:X%?~h8Crlr'/QG  Eu>vH=KO '>+FZcN|S(흂9kN|MMO|ԮuOUSgݧ٩Cgu[^ۍd`.3_j:U<$derWKJŁu%ڕZ4'Äf &0ؖdxx%I0}{?'fv! 7݅9V9=FԮS)|SDvVe|zm_iOpJ9d## $ !ʤ1R)9oy_fXmApX򀾵-%Q(pILv%2,TjP/%ؘqH.p&ʤ@V2&;ܶ|IψiЅu:Jp" A=H a#pn,k6%0g)yRe2`[}ϢWZuu9l%v5;ܯ{:˧-J p2&n&v2XoJOƒ9G=s4FDQVࠛ31Jيڭ`1?@騸#!?~2n#gE?tfΨp6D(f<@޸gUwjdnX* <`-Pf-bF5w.Y5s#5ʸqezN{XQIg*1y5Z#KLB dgɶPW"9MU€U)hH9@'#UGʔ,<3P/y1|i">1{wSR?co>Ѷ}7^GD{T~sxvzաkƜ,[ooλk!~NY+8yTF>3iI$X-3}Tթ[X$ fij0.I0]'C6$sMs@߱Wf+8DYB~."֌6Zlcd(QbSPq[7jSML+{} K=t|o;kj>ɳf [I_( WC{HoXR-DZ {>( C}ԚdJR{Ȼ0gU*0缨^ϧCއoSNHOꌍoqdG&ܗN>dY[*$_8/~XƵ>Ǔ.ՠ(ޟ^ӋsyiMyû˫Gsy;||bȲ38f.u8y‘{.'|Sf[^d8߸m0?ѥxl[{W:Xa>RqיnMuS^˨v>Y{`z4yTnzJafx٤m|]H{Mg606TczH_!^E9 x ?a>,BH;#[iI'Y J{Ep^x jd|S!Ȉ4(X7`6>w(S] !l2=0G"hyʍb~CV|>On\4[rȴkhk]~䮑7{f}klW0x( &kh1en v#I z&aƼcGWqk'̹k^ɜ Z/Pem?n9qOKh)g+no]k*%U [n}ŗSYև?} d+)Kɼ@ MbFa鉙D*Yl6Mu래鉙i|Cx,6к/aE"wAPok*ҍoJ a80M1f9E廇3k!Sd/=Ef}S)2'#6Vx?Sd=EEcґR>qRL4v[TTơ:@Reuȩ՛ K6Ac5B,hZ>t!$,H>9Nm#soX5w烠FDŨ.fsUs;nr;}:A[R:ʈWޮZ7z J=k Q⹤<" ka,aloo nj}6ksZ: {wQ{Q'ܞ1p+oqV֜?8P!y3: ֦FJ@MX7Yyv}^t9IlI=:x`s^~2̑Hjz*o{tУ)x3Xއs;YZ{}[o5o{u4@{{+To VOɈ +8gxmc`k=z{Ʌi\]`4j`z^k @"ѩ٘fegds#BQU!@|ɁW02)H6ǐ wAVc΅lTF*rN*iFͥ l_ZLFՈ>Cfe89?.wɲu_ GFs^eL^gϰ`+ۅX%/&b.I4'  {StdcUYCQYxIXT~ZoNdkU{qԅ"c{ޡ%v;l6Z"N!x(q-ǜ7"|jE_j:[+Y+YbƘ1[I"2g͖GvK&OocS%uu5s;Zo7i;ȬF IE;@BS-3K T :JvgvLOY;k,?7 Bjr= rߡ"[u"砧Azd$Gۏ?y-FFLi\<o/Bo=k)5u޸s GbUc ͼbݵ{C%TzTN}'_0ٵͯk^M30}ζԟُْ$ufL2A=>|yv\FSiĹ?N/WY6v=uܝ?m[3fae?{[36XqO=ԃoYxfRNFL@~'$Mz|O=<}ֶUO=<ԃm]9 }8T뽵=0 od`y$OÈqwIQ7p=S1edw8F $$Y%'Xrpb̩B.&CE[?M8uRE/j:X85  Xb_BL#_ k=Z ާB%:U&u7Hڙ$0є Hb䚭ZX.'QRkkB;?wP?-|軩|0A (hgqAY<-Z% уVjsOuT׃ǏP V0N~;9@@4#vtJMǟ?|:>^ÜmG'9VDB6ݧX+!xFʂ{RF^M߶Ӈo#OˆH.9osPi$J;;>]D534g_HUn^1Q3FVnK]o;^V*! x~zg'm~pK6 m}`#M֊=vL:*)}Lq{OI .d.ʇHJ@Bzm 豚egz1fL_tdnVݠs}T㨹ny6z[ZdK79¹:, l `l G#Ty.cO1@1rs!xfssD49=&Ͼ޺kF, " EbM~ K)mM֌O_1sŪzMӤveϼ S%&ьӉN#NJbv$G3o0כf$R~QШ/z8o.V)R;I#5 *|)r{? eڧz>}9:a#tʋcMoKQR..wy8w̗g&^oɄ~3}QHyq/zp|x֫Em7bl=#X~Æ+7oo)q>J7Ivss{n,|n+N)|Ѻ-hF!@3 Q,h[:oG˩cxȭmRFi%D0fH-~B'ڴ yrj Pqu,ͧP|TRXUVڇ,A/;?+`U3ۼ H(%s"EhZc(VTF6)RBM.=R* U.o%->f4G{19>H^Uxr#7w{.1109 G_N-20!MV4ƾ8{T9+D=_KYЍnU6_1l8MKQUɅ>)PPo(t7Y}~Ujs5plΦ=|fRϬjT uL> dB5q63>u)2lV6CM"l)jv5dk$33\ĭWhFl:jQ7  Khj`ЌzυxTsHP"s_(%hg~2e]sZu'׼:g>ȒB",>.R=}L:ĹʏYy]pm8~+UG@hTd@V&mj[dܣ@*OJ衱Cey5˽h]' IjIj~W1Ar] T(& 4[(6Nqev Ptv9*r9bda+I|}Uw4y$_XƼ7S{'CwT LSi$oXXS6HGגND#il%ɒ9Dc l]M.>^2՟x(C <CpJ;usُ_q@kfG6ԡ)-buYZU]J$ +U1, I~g/ {.`n?x{s((C$R Ai4NTrAr*d5NC"-Th1|P|Е%7?6Y71VdAGU Q&<:KF,hdG45WQ)Ǒ%#J;WGdL[t^FR#Q 4\-qB=e7Eַ䡦>G9X@~[WGfG7R>2E9༝,u1gS9 (d -6׸*N">.cQy8@9몏XwSj*ՑGޅO#_YEtg .|Te#d0DYkzzT_UyaWhqjST{ JCv>YA-ߕhMU'9li\$9CC!NИbD%5׽2-Eԋv$ȵ]O6^Ҭ\1Hs7\x99(pskZ. ^wEl}66n&Qr&-zf;Fnբ*cܟolIW͢t3휹PX!!6d܂V3o&$7d:[S?D0;D8ụD]mU4Oͽ(D/rUNOF%RE`"fCڵ`}NTOjOcq@s?Ϸ^WrRfWFuO|Ô*CY2]M&?O]^*"ٚ&sQ c Y7N%AOF[tF||Z5).S*+Ì1]E%'.9j"ނA}oǹx~sNYi_^9cOtyqΔtZ.HFUVMtمugFnf=Mx@ Gn{0x}8U2b+38DGG ̮:q܂7jtqi6@H>WFq9G{Ϸ'WaZu[؈$er1Ys,!bv2y9ϿLx>r߲/Uphf>7`AP, rdU*m-֊LCGDӦYsxech# lo?}-ݏ>8p',(%qG1X*|i -N*sZ R9A)lmIrKSjyT% 8,I%"ŽHaVEFa<ߔؕ&DID\Y(T )ZF_*^(훾]*VS#pNyﻰzQ?+_5{7pkuԦ Q?ElI>I_ňdZ:3oׄtgӧnBWJ_GUDÁgYT1G*qT)[ hd]j],</~{wRUg%,`+V4G2CzP UW[8T\b%*x~xrϛrtzLӦ(g٭@űce|\P1?>/+u%rкSLNWͮRPqWg}մL1{ZMJ/C 8 Z?xFRLJ +#/)ˤ71 6we$}&Jmo}H[K=uH?#߻Vr&O7`w: 6Skȃiggux^ ?Lo^_- e9iǮKTs<^+>{Ǫ jQlGтvщKPq=@A{`h8P:-$lHU)a5ʵ%ܹh5 9ny\W!٧ѻTʊF]T)L Ꙭw^Y1Rα14?S}?HsZˍ{ @]y330rg``ƅ]ysv @^7l8RD"I &Ui>\\. sV!DžXyW7!/՛TpӡQ ûqDyq.}s:|I"d}oN}; ^7tuaRqDۯ^WIW˫w7ÿ?;S~ko;g>}\܈O\{렸 lh5SU:/N~$)"9|v̈>OFĤ鎉" VՋCę%ЧZafh"-!0W`[aQ.'v7Paˍ!.^[6,]mϚ6x?rߎ4ر%. >"9@d.r,gd/%Զ궻k`d&V%EBG cheup>F͠aǵ4x9T]rx,:|0fMC[OvBg"ӗK JXx7N[<1~Yz Mťr 'Aʥ\+ U:ORx4X4sF'gt움n;~-$OV߻BO6l@aF(N+y=\8]Qb>,Lq'$~ZJLQCFYH:ʉՖ8pݕ'2'fJ[#}+QM,9ɱπy#[ TK &tOs^,o; H;t,bW5BmQqYsEc-yEK?}V~j6 AS-`|u;&YԴ- ERh( :EʆL,%6M'm.T@ezɢ)B ܙGl ("Ew9ҝ̑O0x PS瑗޼96V #}@neWi{z2t*O[qP'Ԇ8#PrwGRDLN.Yo bv YisRmFX7c{UcjC֏ˈ@+vGdhЎ4 힘9RIĺ!  )n']mU!A;+6@= y(8†hIVD?( P T|hdaT- `BG@ɡGqVuB%WZ ש~\k3,rXJ w{ >!ŝ*qBK2Vw>cyf8Q# gVD.C+BCʪAE]F/[j3ucTҸPl6+Ntf"l-`> aLw*o7f7J]=IRͱeMDRh@<)ϙ>VV; gjŵ5[ۛȋiƽhǗ0c ʱ6CL*-h.^=\^ $)1bjգ$)fl6/waM{bkd\\~NO ./k.UF/+ߒֽ)򸾺8kt&Fh ~g1,Eu{RU8}sd#o:P9_\P85o&5H/9o-6׬r^⑃7W«5(gsOTx(Ɂ{bWm5Zt5F_i姇?F9_zN/L}hЅ1MDleѻL8iu"IAo_W;SxzfA,E;V9ӺV Sѡ?L+UM̉3gZ.:YRcrH{:^\e6QdUq}Drx&%柗]֍,uTj>⭐F$ -qd)CJet`1f 3%ʾ0p8y1>NW_6u9X΋4}!bưBQ燠s0t^1.wTd QZIMr84a=V9#%H͛'|AsP"ԌO)6EeMSFX{ :9lQXRKrTWR7 ^RQs~댧WTԹ7nKR7Ir۽ٰrzwK(dƫTp?I|9!lG~$,~^1y ̎&KͰY5f>0hanh[U6fZCmX{!(qZd* $0*3q{.Ieԝ!,ήXYݹp .wlӂ,FCYLo=dNO%Y=uyZs>0nOf, RWM|^Wj}޲9O5:- 78E^4V0StJXJF[v%F  K1cƺf4%ٛM;Ɣ-oOO~Ah4u,Ϊ% 7 mI _[}jupUxgC[c%Kȹ FXIB,CKl+~B)Hm|NMa1l2` pz^W07bsDf1c ye*ĵ&m%R3'fk' /l>VyC;]k:J9Ci{zÊ>5eNͫƄ-m qTenht5s:o`ߢQ*y$Gj3J9*6IEs^l.^-˪&FAb)wkwH:+bxoڝku3q{x>~|Ǿ:쮵.S}0+dw?U7~ޕM%}l )gFꀍ,96Y7ֆy?1wXSf4!<1ri]7cˢ[6SҺAc&yg37Hwov'lݴ{ #s pexA._4]]![xy!]jBvFV?֖NJV"TLu6Чv5}^7mI5stM6ʭmytx67m$g 9d9>P>$9oR .gU3*z[`- DEUoun&gmSퟗ˓j /{.z%7UuSU*h5`(V|S[j^mN;3D{~V<tۛ #y6g(C|#M`5?S~q${u:hƞo)/[r_蕘Ds9U7qט7dT}t[n.6>kzۓ׋ӓyw(P8*ojoJoڹf7̪8ͧi6ٛKruz;Çt@w;nHU5XV`DЀ,4V{1>AQr*"nIF_N#扺b`qY $%j\ͳN쿳qy:K_ϗ YOa W Gc,UTM,M-ee֚p}n06[jlBFplPbNDRÍ J|;Y]A-^R|q7eYRksu|s}>s$&$W.ٰ'} +;*q<]S[Z0Z 6DK,4E5xg'$-gG.Ma7ʁ>A/N"*ZBO ֬0vQD!ڷul,gnEزX:WmN2utp=gZV1}>+nOLMƹ//t=p6鏪gRjzVfIa|Դ:'oSir[4wWUj?m C.T+ ulCi,Qk5PElBhK$瞿nwW ="]N8t i4EያKvsJŪn oT^,Pu E'7ʶPStTm YVE, 4muT4 ب|Cܣs~GdC_]շ.L5]"$rmDgm$feJT S0_+렼jRF[# =+s"Vj}r,<jU4X`\UU"E Ps >##o1u;MטR!'lN͡`Mh6 BSJJgn܋Ǻ(VeY鯢G UQ*x /߀ Cn߽/icV9FYD}Rrxbh[dOlIu3].NgVxk2mO=4qQؐM em`mۖmcE8XPW/W TuӸJ-3u+uuBZݠ үj^ a/;j_lO^g $iA@!}tby6 Qyت$=٢ӳs4THߥ!>E9VnE“gK<[KMO܃q8p=עz?9Cϻ':aYxa` ӛ1.C+e"]4d_d{؅kpO]Ms3nMp]?JOG Pl. d:ӳkX jvzR9YPzWw5TT1_),ɒy^j?oPݖ DBR6]+]q]Ns-6;ChUSqw)gxIJvh{1I9Sv άdu+ן֟?:Qv9t!:_չȎ@zMoZhrP4d ,}4%R]F,m+2ϼC>/n9ӶaJE]y-Ny,(ƺ 󍩞C}ٳE{+10Żt=v^gw'ebВvn ؞gʾCڇy`[RO*x@Er9HK.`x6CIc5r7ѢQN{K23l@LJ8^d#۟ۗs'TΏFu [zVssA_/\q[כL*GQ\ob*Y#8t-w bqgB"G;thnspBh_"/7);6 4Wdp6ZK94'=R{V =Rcq8Qĩ/ Ў{yi7Tmy$\ڒѝ:k]'E}ta!qo|tR*fdN sLeJy=7rLBKDRu;7e7Ď7zv.!0*e!N%N}I|u َ?.,!.}7ӆAoS/_IzoN6,_N;)"8|x0]`g̴IX 'Uj3smz^ߝLO7H= oU*MpanmUi%BDdkjhkg>Rrz*!dE:;jCL7^wCZeEoF|+BY\*iZ$m\@qMY "0uMۼɴ7Fze/;ڥ1RXۖ%wp@O<y:ACp`Ʋ)MH^lj.qU5 o\{<~xplifsuo/UpNLn gq'!4ou$|Ib}NO:81Ncx?ڭ1o1o1|>p6kJvlJ*(.kX0`B5w03q41]#('zgNOɞ\`X6Jd(sT0O'NYV]rw^T0MWc'H9ݟ@eZ ͱ .dHb׌gOH_nPr^T%v%YQ(rLs#:'[G9ػ X7NܱAA.G!y6 %c()JklW=P'{\x62dbthðZ?Pr39DUC6cP]N8ԁl ^GNI6ֹnUЧ0lh\7c6ؔp]v;fuxԬQ-V8>[^yKNIY{O4rkfmZݶh04:c:Pk,5Կv,؀摢VDZ 1׋Qg+/t?wnL ~Km~[k(F:Z)(ȶ2b۪-їbݾ=ύ40:oj(.U^1>6, Fշ"f/S093fY,bHV-j0*z"b'TPMBι. *bkG>U)Qgk=6.Gt>dρ{}D%Cz-)zy^<a^/ZjmJ{PX&e5ߪUE|jn*AmY=P)SL̪*s`cVBKPQ/L9cXD˪R|C yd*h q*L-s,nDui:toF?inzoD'49E}q~Uy1:Ki+&9U%ӗ_PH7}iG?ݚ|toT%MLJܧ54bOlMC*)b^.4_5af_T:P_lmۀSпKdM13҅ٷ8!fl\o]3|3wWu1n|[̒~MIo#U%ƙ}^!:2(DBTq_y0eQŔ%/{mFӣ$H ǧg>7 6ۨ|H&+d `cbpPp@8,w(䙂p1YiJcVҘƬ4ҨJJ#g$ Hκb1,z2JzAS%NG\u: *bɩJ?c9FC 5;eDwLB00/Y˚Ї~7;kv/);Bȕ-s0* "̥ t*jUE|#]-VuQ p<:]K*[IBlɪJcd߽7G8A꒹DWe7 av 9&[t_76R.BHz_Sm,žǎ[df`A+`"D6Pq16ͽvĖaaņ%WsTX9 DEdloʺ}{Om쉋l 鎕c=l pnU)w0'^ .7^ʪR|@ru~/ڭSUFH_v@BK{jz.yĺbw8gdhřIjgsɨ^RMi&Go43l]M.IaL$󑐋tn ?AU'S͢vS\EҖ5YB^3=9HGsT<] ǛMdvMvʎuN{@Eœ^HSpvR|gƏ۟/՛MI?ǜޝ\?R/O9``}W%Nu|wzA,L/= i!`"S>(G&aߟ=!U,!`IL)MB O`!|wM7B em;(U"m^&T=XL.JI'ڄH^":h +Q @0%F=e)o&4-Z[T }Rr6DT1!֋/˪d(֛:9q%[C,l(MeƠ/+SYӆ:WЇlEJQAR|Ii} N\(JuXGbP(,VT$[T4m\|J+j=%pPXcUF2 Ȧ*lx}Cf68K'>-S5V [܎ P3z7qK咥 d __ݵJr1ݫVs %EII([+ܞV"݃ 0q[NY}ZGbm}8T,cvײXbPݱ_1jєb_\Ws+rt\;$Z 8+K;q|q'͌mr+Y {J 6%rZR<{hAJ"]0rɠ a\Y1S;"g)cmci5ڝ3$tV`޷sVQކLmsSvM\cvq3\cې&2./ >S̳qs8 ٭e :ܚŗ#Qh 2gf׆oZ2z鼕hme42Yiy+>KvMh'O7W_^ ̞k31_jllg:&/hvWqVT驋OMj'qr[Im!eyO}4ꓛÚw[% sJ17&Ϭ>[('TvO-9?ٴ_I[91iR29KvAJđT8~MC8RԠ%A*z}DVw{j)W uEpgqӏ;f7cME C% $LisqeoEMf6 oe nˋn Kp8~jl|,:l>r\̉4je:0|#N漸O]L#5残-!~ }NopaMbqqu&+k/օwW?w}g2+)Ҿ߹Дtl^f2S6|SIu=ujh;%jdž7(J[24h%n;ddzW?2zH:$Aج֙Ԯ[TAj ,(|).W[(o: {jZ-OI9D$ U\s| TݷϿ|ejqァf3ȳ?zx>D ,$nuQ2b+;b-_$W.KRE`c`e=]ReuFh@*lYqGff+FI\M'쇊ņdFIfnN#"ءDw4PUtEP@*z%cm]P6T$(SBs3oz, \,*4Rloi#@j÷tcf$qBX;K(<4a1G@fHD`؛h$sVD|Ezj3!)/XN2)1CZ4Vu%AVb+FY XZ(kwėg lH1^X֨ n%V lUK v1g6Ny>*4q~uJZ񱲡hYuegF+11ސl.eB]ImXڗ].+)%iDT8 keSe&JUڀ>XVܕةogܙ(i݊{H{XŊ}oHcm cm('dS \ `SJ被j%QC]!D.)ǶC \|k-^Qi:ǡz+5;yCQ-Z$sPM"Iߏ3v(+Sa IBsP{ U1w!˘2f굌y~`2mGJLӔ=JUU TIuU.2-Fj\zVU֘ٯ*H扵kP=Zx[FPԒJ(M(+uEl=V2yCVքV_|E>_8g٭]pŜǁY@E/1|09XVԀȃSfnc5ez[=i7&.;;?;b>kCfH!2K5sǹz]fxjJ^p/ifuE[TZTܣWt½[Cn]*5VxNi)lKoÅR(!bb]wdRMdR']hɬcnhV-Ӷ W=Y=!?mY0C(lfria<on$p5}s^̦sR׷SC=%[SW ~+&l>2Lr^TUFTV2 $J9*_.ՑB!25utlFP"$iLO|Fjh;d[F)[ 5 +5<P.? ?y9"~\s$A\d0D:R%3Gd3G7imNdi7"y[c'zmkܫ8y[ACy>b@ ~fW ".0,4" կ[;?lEt 7Nӵ,k 8AOɃ9%]^xϠ|[J@$xQ `9tNO}(-Z"5c#5"+BDȇb"(˒@ ] U8lVv쉻{EAK|3?3lP*W &F*L1+˪t%vuͣ,x5bet[X ɝ@2+%+~;&i3Gy`l/mǫߛr2iLiT7n>tAgBq]QD&7H;*zYpnmj]^\Eg7rGs@˚5 iwvHXkˁ9>}|3g s'}c8-`~7_4+dWlQٍ_8UV{Lv:)^2/_0Ga vqY=J6̧_2ߌ? 3VTz$luY*UDkVTfBZJ)A1_W-nՊ=:1]jav gwC0AN0g QyVmȲNC7EaQ%ԑƱFm[j͝TUU(}kQ)tޖhUie=xڭҊ?}?>g2-lƆgLe7 ˚H}o޽ʎ#e*ChNkGւg4Z҉Rp.hQ0lG`ORzO*BF{>&PM(ۘ0AG5f0 ᪺*:*ڢ,U(`NDw:2ZKkuЊbfUq#Y]#j2*-!0p,Gj%7&Z]YoG+^0PdއyH}X{ְaB<&%oddYYUݔ$R]őQCY l%M$!200 nIZc޲*VY%FLv/ُ!K.)K]3^}@.~OxuĄjGVei)DGWI(\ΊZ?,$A^5y-Ϳ*0bYuI|J|pW8i5KA 6fI5S\fwSL0n_/ǖv0Wzk;C\?Pz %K*RѲv36q\{&tE)Ev,.oQ=!`x[GjLgVeI9D!V%FJ)5bsd9Nbw}R^L'{}tVYa:%Í#}z5&GpS m 4P⹀7du ҚȄEQp8DCH `{)WѪ )AlvTl~~pGC!|,ſ:DL I0Ku`4ZmJ!#;"\dJ9X [DIoA,H*T *g1_^=@cf' J&݀{?U^\x俟\l~{ߝ LJ!ޝݛ[o>/}5)JAfwL8:hdY1WSe$*˝T< -}1/_jݣd:K9֘\^Ps[Gsj 9OHnx&JLKFBa wQI9Ve܅],#2,chy2v.OCm=<1ZX7,(}P,YW8zQA+FisH ;1k4V'|HN<|s?Xr¡CZ=UޏS@.9"EYQճ?;MGi4޺@1 >D{\tr. IדI 7WBY#C~,QeB<#i 7\cl)#?Q +# PK$ #C=SO2T L9`|򫳰W:UEr<رs, }:@`qҡN½2Y1/@#Km`êː#L >@$JjX B_UT:XcC6<_f3QMؠYyg2cxˍުG@FQ TŴLmn]wb>fzʃ9mf _SÕYwSߘy-8^%歌PE*ukMQ?yjo+}gK{thu[ٶƯ|f Q8cr?|^;\4͖I*|>3Ln\Mim.'å:W"n4=hJ5#O aQHFl#Xu VcUs&xE)'+,EK=bo%_)?LIE9!%x}5Nr'uMBU?J2F#1G=(S]8;8ݺ57W3)T~:E $!ݝI+:gZqA oJVvf?tN|y\آ& 9wdyuIt=J8¦TmtKkgmK*S)5Kr# It# b Ӗ5Hƒ$Jrd{iN٘*1*%RcOa f\?(+ JQ$u"U~HQKwtަ n{lW|aB8*.pYX%?I[@^ǩޣ/5/ݯS5u5iܶ cs%V 0z*7(Bi`j$a( MhނQUp$T 6u_m8pL&zhYPtC%@j/L:LUhLu&0]L(v7L&Ryga%/2*_$RS& v k&&u]˼f7]jL#0%*pF޼EBz;rʇǫw`n=eJ \o\=fPЃ =û2xŖ sO)\NzT$>6&-lv>=U>UJVMV wk3!j6i>gvQљy}o,(Nw2b=G,\+Zd#62W@`Z*_^\`tN{pVLI݂,>;~O\#imdTUsaFiwp&EqcqG#b%:ᤧ[;S^Ã#}g}#5gGecĶŠfvo~w)+܆T]loH+0k5Ζ[Ew;OKAzy f7j<>N+\ʗ9P_I:4IK1Wz 95Ms5fOs|?V jYS3sN %Q@h16:; $DMh~hs2,7Ddk~ 7w#?`y֘ )]K骄ZjZy_-!S_x<X #FS!Lv+V1lb#EED ܳ(> _XhL]Y>;ffm5s?e =`3_!OtNqY VL¬΋&Vb8&<׉+~'p 3_gӲO^K - bGup"dJ_BY%[Ƨbc\e$\qNk(3VI^gg*:ueɱjH Pq7'>=iJq/RjOd~WW.4+#Ok)&)Ij>\^%%⫵(Y~|R1!JmnW vl`)@ttfc8;D'|jz {,Nnl:5dۋY3e4=̲m4.t񲹹\ UH4;ch,F,\:A503b|luq/,;&ߓ8Q1gHjѮwzd ߑ?'. 4h*M.XI6[-Uǩ`)xK*7߽a[ů8w\׹ Z 1RniCB|$qwƞ񍗿 ǫ}0;a{+ 8I[T{ 8v>ˍhziW~h=i"QVWM'2r޲{c c 8ֽU*r'<^{n+{}1oy[_ eU_At5"Vo_FqW- ROQ^,/_K%GmZ4sij2]CJb]~.1F9cNfS2XR 8t"T5E'GcB2)ʋ_V z<-+1Q}D4?bj9VW T06F;iTͅa{Hɾ<>xt@4Ϙs#>G,aLfGͬCd0~ٞ؊( (p3[,'vqy|$ qNQNrl'eZrY^fxvd(< Ƙer~; blؑ|ԅQTzS.P854+O֒m;E$=.N6 3y_5'gw BJ٧tv# ޖ;Ÿ^?= ?/xdLEsr.;&vVwMR1f=F˥#q&f㰓R iJqi1ɃARü,Yq{.xQ-[ݭ3n 0\I(D$5a-pgԴ^[2 3#8]1>h(?jnûFRC<D5AAjIl(E{Yn{%@hjl0fQȥpc{]Jh־!}tK5̚To0nf!FT?1x0tqki-ߪ% xGAE׋[ w֍vf(uF鑻[4Jͻ0.&̅H~|y`\}bS/>e+yi>߂*o_7Qt p5OqV!S,5,>g*&7cA5RY>_ tAN@ϟcǽw2B]b%/0暓( JJs +h G2T+C9R Ayjv?B[:֑!$rѣx[peG}e-6Zn6FSZW/x߼[:Zo䤜<#ygu(e{mMELE\Q_Ns,N֗K#hjR^.. ^#u}| r~w^Bt(7] xw0RiJ2AF+Uor?o3Ԝ$YNyul*cs_Klj%f㿮<>\z:ﱕJE #ҖIjbeR{R*7٭z%vZN_A5NŅLVާX(ah 6?lFPG0p0P{N9,dWT-"),W䩷碳݋`a3:s%VXA{`EW$S?j'¯xq:4qS-7aZlvL>nVcŸe$3*\I kDrx2w57\)*(6Ϧ7 l\{ ПB%*㙔ks|1u$yebZ6ml4Hx> o9rO]=k3]_9Eu5gZ=qLvFkjʽT^SPH- 9͙y^<᢫gT=+u.EH )1bSR9RZSpj(Ӿ& ;5O_sA,doJ,_ϜBY,vFU ͤ*CnmbbH *F{o/"ȿ_V%hJi|Іx!Ew$Ib/Rr1 XVީ%iҡ$#GҚwUS[gVayJtgL6m ?I`2d!bxIF <ⰷRM BH`3E36 vZx`'T,BRcSxr &J9baU.x. M!#Gc><慪LѤ@M`"U. aiINawlnBcCsEUY9Sk,0qkN?Z`S+>xӜ\pb릙Pp[})ABY)˹2Y6n]``V)b1}79Q44'AH!:=xr06|\$MIe= NԴ3ka c1'h-~.w=tr_[ʟPM^ƤPipTK oe\ ŔL)e,h)PCX`h9w?k*eb~X-Ni$gnrb}㑹w +uYbx EH?Uu M(:DT";9Hb/@Z38Dv9{tcϤHvHAZ0f&ǚyt99>7#$s" k__ !ORX+f]+%qNai $=UVqnP>ǁۀMZx8[e)M'wuT{Z#Ҋ")iC {~zۅ0Xb\./k105\_H PA:dm $ mNLNuBKVJ RM *Wכϧzo5Y\E|ΘXo#WG?@>yg^1en^zLHZֿj=cjRwѩX[św`Enjޮ;k>GzV\{aٙ\ޖQaE?MofzO&A993Wnqp}8Õ:hgFATbl=ΥR'_""[%/+ofoo`/ϳ/LK_*l:zbA:~8)daɇ AgEf~,0[yE`wt Os'@{eG"ϗ,E#ml-fVt0ۺK>Ň DqgrENuJ'V E.x@6ϬQA yvd ګ5bă!VE /n_?;P;D|1$ï]aF{3NG>-b`t1]DHqp~˓_nF73WJAX,?¯5Nc,SL'k kgO=<ǜ\i)* E:B([a]}?rM Ftqyl!N ޗ194#BXZLUVuW@g9gskLcDDGuc*qmf#GFK-C*:MOڄ*Z 3!̇'l9 r`Zn`R @Z~-PIEJ v`,6H yvE_F;)2ؾ-Ew?Oe^ C&֜M^_L=:KxJ\VDT9wSkKq<Q$"AXW0GM y'P X]Z^ĿzS4"~p&i{W/f~9/,p;x%h~obtԓZU: P`q,'b|Pyf^?õ- x/ ]=Eb} 1aE:3S|5*,,ك!1*pYΝ 6A+ͯ G,}p++yE VX'S!AFx .)'H$gDHcⲒ %ZQLR.snnE9};<dI-rO^JEK˻}#L+BƵ%uT%RqYZڛBX7'溂coJ@AQV KFy ˽pPjrw{َxWbӷ5%BZyOh4Z}_H>KW}1l:Z! UEo,N]54~1Fpt[oV*<\dJ1defx0YSs>1b=SA[Y+@KQuytxIq:܆ U0yJu-(Eu8"AMafm4x9I3Z0.T/lYն]>cyī Df}UNu7Z@zB̵c{+ǵBOЗ{HnlOVkЃ"56!BH@Zˑ |w툼ss߮R)*6Bߖą2iH%(`#cʓ^UKpளeѩ\Ö"Ot>EYF4>PrM}fP%7A X"R`( Í.ďt1,4L$F6[z`"IBE2V[`z191# !vF-p.Upq2 fKtKh`LMD _CBa%3"!!y{`J$ |E:Xƺ^1u8ͯNC^ksfR 0JN#JL/p{W(  e>!+4t0f{@}e+< m1W<#BkjHFIyL*TH`c6/.bKR3RLDE,Sыܝ4P=ܾS/VN/[ro5YnbC%vz?\i^}TדnL?o\2@hZ?%c)%\@JA^~e5(k1Ԛj Ftª\*=[%<Ԅ'֣PIC)H4qPxFP^. "raڧHwHAT{k'bPI 8ख5ݰ1zQ Kn(='=qK58  nN'*3)@О84Q3zmFj)0 aQ]yJj)i>Az3R%h3ꂐkx:Ѹ܊kglAd',=iߍWm+Ӡ8M uq_Iz7~Zu*r]drrY(8w*d뒸V"eTFcWK]sJ-="M@[~YK,@ɟ\\t3G]Fs?Sh+;ZH@Eh U.?k^/'Lj Zo~yjt KBt6l JæY-,vnPRzjl.8]*?yE]T a,2Hǭ(0H)Emϧ Ho}%'}Cx}ox8U&<1؆%FcTH7DЩ\zF x|k_i4|1l4 @/:ѧ+ 5LtV؁ `5p,9ʗ$@bT )I€3c[++B3Ql Ý$J-KH*9 IH?aH U`}Fy`&LԪuVV6=zl~-[,F*"Etwg "7S0)!dzjJJTsJ锔=.qYު{7[p WLJsqhw."r[d)lG!eǚl Hѓ"hDR=I;f(NU Q5^ D1yr*oW\t:z6P >1H$yJq6,r>fa=Sbl_1Dg*Ua4J v}FqC[b316sl&ȹ/A/ s~s{J2[b0uE|x3oXP>CKC0>,c9r%}j)$}7+|-b%%2[ZfHe9M03+$urzBd,Ahi\cב{S3Zѻ -m1_n0_=Q~4'=ԒwoT.PDОiXZp>ے^$I.A;;e9;}CG5:F]DdOK9+M,"7^:cKOe{ )n\bvRğ!~`+8'[Y3܍Y܂:Ih 24|zhh hQc`nYa.krI$JU'4$TgcUkEk'[BK]Jܺ޶_Фz Ri{T+CU+ҵ-1]^}<-Y>.X}ܥ B,e*MTnnc2fXcy:>aG~<5U?կ;KRB[4GqOѼ͉K˹nJ ]`^iv隅 ޖ|Ÿo4MQw5wiiWo Xv7X$0bw5mbN HJtA+V*绝}_^'D *00\ @ĸ.Uq4.<i |7mJ6ٳ&U8[za^79{]+!F6M۞~u賝No/U=t/O:;;`2k溪z. D@E#$_ ]kھ_C=IϢ{c@_;#8>`vOc?{z=%Ƿ >GQfM<簸wG_(Ջ,g?nu+WtqǍzb KgǓp{;)%%wA]sBKz40GyBݍFwķpv `֭_¥%p rqlWzQitT(AsDvY\vQ:=K*$gNγ1/dn0F׃Qlқ.ιѡ:f<WwL@Dw3Av,ԷW>B-D/l+Gf,Mڥ==x<5ac;*φx5i7r$$~46 \ɳz}o#'_C?}O^I.iЊ70^3ߏ_ףDMgRu¶h;WrKL<9ŕ?j,wgv]R,h)7% ]#qx?? >#| mGt ]YJ-ͅ:hg Ew`pnqpQ9{1yGaVy0凳9qJ_ ǓX~8lsam\D\*Q#b\M&P=C f_[z0nZ^ u֏Ng~y/&~__l{ lGoQsVcamUj"~>]gm!; 4XufEw+MUpw3_8hYSvZD LxYwh} z''7ˍ*f9 >ϕH+\$>nP7/AeIe5wC ior#3k  9"s/C̃ ܪ'*BVvBn|'n|b>1?E>'A'V%xpSjyb>1B'.'K j^P#0FIL)Si~uTJ1U>G[)"R6]@j^„E"""m<5vIG#!uH\*Ci3fi3'fҖM*ݾСݘoO[F4%HOA Зxc, +q3*< [ 7RF{8]iwMq2$aL"PQl}a 5:}6!$jb{]&*%inoβť79^lg un"(g~˰%R;K#?.3c )Es+CJb U)Ms+NCb5Hm`v=RFZ7LoxPfw1&Z5{oySp'x *ܒߕzeEssi~8LWODZP>0!]zeA4eijdd*434UI4_.7JѡrBKeHRX&R)BMhIM(ՄZR$%r#)3pW%| &GNt9qC D}L_c/;Rs:/;>|4xhV;'БMK@F] )[sh&ogvqp{^LEs%IpYl^AtTH> u :p"r|0kyʊ7ig`gm ù޵6m,ٿ҇M\W`wu7qIaR 6)R}{(QP-K>=38i7jB˯R6Tr'^h&^h}^ O MFP&^(,7`(f(z}0ݙǞ.D>!V82 Zɞ &/jlTO 5r{GP`T'BHHM X >,0YH%`(al><(EvM~կO+78.>G`r}AE*^ Jkf4Ymy++7"[zkKOg0N1.kJԟavzz^:f&PpLM-zDS;}&*M-v;\RL5b L8r:3 dS!,p%;j?cl:hL7$xEPH~!l>_v ?JTai0!խ)&S"5s6 xG,:n8%SN+h{m[!Ѭh$PR%PP/G J 4 ]!8JOK1Li3c9VCGUDL}lAQL=l!5I'6!bMpqUb.5S!?42INpr:NO',9 NNpv7# `]\fmPX;|dBq ͩ85/;dBy1[<:>+].ر\2Tn!4ƁN8^40!4ˣƄ!uՂB>+S`X>_b '^qfElufN \gI , t@3) {H M^F3)w6*MA;J WioJ?! ]L嚉ݲx` Z>c¸I@'UM "G傃g8# i<ʓaq[)m!L2|t iB7bN=ɏ3Zb/j਍xeV,D !E|{-Jk1б o>ǂbwE-B%9fli:XY 98##ߜW²k%ך4Rh8x1n?xZxt09TAU٥wU}ϽV}0T*AWHRӎ{{}g]IW!,Bc84b|8b@$]5axLnR0IHb"]Ip{bmeRYdW P&G" eX%_?5_ʾYW:XLB)G+-:[ؖxroEO6zfu5JcEALg3[L~ߧ=5gߋ?ߎ_L?&/G6*3u.|ԏ^];Ѯ)G%|W/~~fG&\($?a ,a$W󌋯[YHx~\+J;L.K.ή\^ǫ<^W+l ߏV1!c<_,FVو "lZۑ!T!.0h铿"@R }t1O >.ro| =.MGL )[LrŇyfܟ%~Et5#āMMaW[U<~`A؜bs@,<Ew66m#{MU \=&JmqFF+6w3|E&_d!ok $Ӭkas?Z8%ٛ8+ۥc^ U!$/Z{=Le߰aZn}eTogAg{hȃ B g'Hto8]zA☵~ѺSrb`lmsV$r7I3gԽ߿zGU8Um."j_^<;p({&z6lgfiǖ[M`RHakjB@u>Gr#,`ѷVӗ֔y $%dUDbä^lq[Ϯx%z )(]aiuܵ5U}5=tUߔFݽ bNȫ AR(v0$4H2wkՖmBqM^g$xPD9*CО2)R0) ,$=x1k<傑NDnq*յ2\Yl=hP|VkV.m3r;xb~YlpRIQWuba MY֒*`AT W姳@Q_>RHǗ-fyfg˓6¬6߈Ѥh~W'G7zl!!*i5aq>Y=oCB !# ꡔ (jҽG=`Q&[ %|B1B"PX0"X`Rz۫Jf)dwؐ"^(啜mWgQa69Z:8ϙʉK0\'╪`(!V[%Mai %emF͐j/u-i뙇Z -%O%&;ݦppTzAqur(סx @._ ࿌SlEU+~5w~TBʄ&u# {cqV58)яpvWNrTPB_RTbMyJHz8Rɾ}ײb՗&S%'S7.CHderI3$DWG HCB)OlTy_RjH<'(y}9ؼ"y}98q9Ʃ[ZAV)ՇhXTGУP (gZ= <Gz8Y7 )4|9s$8ϩ@2ʽRz\puO)판/wAhMl@ \%R^3%{f>Wy$R9CŨԌm\ [N YcɹVVZ1*l B^syQKĘc3l{Lu\c2ͽ ^ Vl9G\;\i bԄ^ q45,qe4ei<7Dss&`aZ牄0iddm}#&v+ēfAĮO'La{9?g2 E 6Jw}zlo\zII`Qn}iڟOv!0NN1< w :MJqw@SBZWAx o(, 8<0Pn08Υ^P2S`16>@in8dAide/vV,$N0j Y JbԴB#+u,#@y%2$_ )7W\%5j >nefJg n:86?qq.WaNL=IqMR⯟[J_sA]n36vFeXlgdO }D "uvT .b:\L.i'a |+q 1k%J0'D.`e721A!acT>* xNi˴Uxzgfv>w0on }SF7]c}L( ݲx`>TZ\1A266b_:a"'}""rq SL|_13&.7\Re0}\ 8w FLL!RSDkntHd\][TTTRP(O&cXb ~?:f|Ý$OW&h?:.j2 !qH(UNveN+؜1xTne*0ءRcݱ@oMﶳZ֭GI5K7,5fjj\jʜU6u.k%8 b%:J|n5/ӒDŽ˩+uiC.WӴr}?ͦt5- Jط!Vӵ+&%NF#Ijt($<,Ze\6|q7qge@:1.̸l:1.̸l(1.ĸlfpt&pPnJLf\6DS (}KKeAFxAFIA9U:{d䢳f) 3++$6^LAťVRAF8KA%N 2nD+jB q)6 &(G9.AsO~b)((.#y0s+e_*ˌ!LES.S 5O\It`~*.|K庶2oCk)xcLR&kA*r#7Nde`dʉ3ċIkO8FԸ0]M8M3\Y׾M`65 HIɗ;l ,p[!Q#ɿBSr~,Y8CE4$W=$KaO='A`QUuUuKZp;cbq'hB PJ7i=1+`jFRN64ꏍq1VFE56툮,`QR{5Cz |h +pzap׸ߪq5dGHFmR)g晽[GBPQ T3Y.׫%vWG}+q/^+E΂ƉۢmB%K^]7ĉ/,LD$Xž7!2ژ.ݑ⏤ѩ$=.jEs{-~S|g:=̘.qN b}2r2c߸TV`I,KJ"o4Ctj = SaHot"Lݓ:(;!Ep91A=kj4^UtYаj_=3HGi [gl4ī* |`L")L34a\;Dht#kG?|:0,:MotvFw #{1^0,.* dJtjc욐H^v;V9k+Ǟ"OJ*qj(bG0rl1 WN$OZ6H+ҝ3Ó$4vϥpQ.bL8;KAv5Vy))vq'9 *DKA~8\jrQ+ҝdbp}n7t&lHQ,_Ֆ?T^ !ߐ:8J%p`;Qt(=JJ%8%'1aKSCe֖k.nz|/:x@K䢃n$ǍDU|/RwXwc%ʭ3Q%{*m\ >@`b W;0G[t/K,9r'\VaH)smS Hb 24LPQ3J@iߡa4(iXh%DX{#9w10 >C@u0$'<&2y5ت74'*­)ĽsRxP[ 3B; pV^bJgÑq3IVAHŴBȂP"}"9)g5.Asi M!#!Ld| IVmQ{ ("M5@ ]YY&J锵2b5 O.a 9{ -!]s*&`|jÔ 0?{&1Q}үf> hV~,iu _8 PPkh>h [o_ʇ!)"/3W0"nx _]0eϵ PB7Z멬F,f3d̵]$A' Y1[v(2qp&Vo)(PH'®%nEvc$xIH8 CCR|⫦Zi`(>nn@e}^DC5 <,o @`[5+}zl熕 7w~aM82357rhoEnSh 3qh =3j m9{5ۖZ#T$*EaDCVS%1KZ&OBcRK!I84OnO( qN] iwIک_Ӵ(~zFL0H;SVbI(0"g #.e1PbNKe'^|bL#)9ZIQ/`9⚯FXEи?Y;19:+S5D͞uŪ*O0gSl톖T}"(֭E(+RRWlz{=jy.IP)6pZwOa teR* 3wFY}hB#%Z{b\RC;/k yV[C-Mj\Q2lSh1LnAvnPU4WOGu:}ASTw~Ip/,JhPM5tC\uN4p鳭/'QU20D5Iuלl; %Pv۹VA/xL%Zrs( Az% E{ cC$U)?%"6$o On1gOie3%,dL%‘xp!:/|1ߦsG:VM!6b"JJ2Yzm7*]4>5#Q~CD'M'8G[P_7ii?~NB)W7i0(h?\^إyIȘlR7N2N9R>vkNV}?v@/H L>1ΗsP;Mpd6D[i#ۃ/z+\]y0W2 p[Ѩfi1gI$$w|`N\7 /$޲T 8r6"(3/U~y*Ӌ$(R5O|0@{^AmۑIL׌NrwDĜܙZc@BmJC1Q$5}*8!N%g瞴lآ7)(Htdž()()P֢Z?պS{>yH*28ΰ$٤dtS?"F~@Cr@`NTfiIBq3jIfHn(n}7LMl$Z+5TI|8zdVIV? 5stՃ/XQOm=f\.Y0E -JcQgl̥?"% !ˈ!ĸSU@D159,rV2rHicx2U vkoL*0%ΜQt)B4LEDSoE(/y̞z'HgDŽccKnIϑ9b(beROo'E^]1 tE8,,/'Ow)H?"75C1JK559d*)5{_םI r]MN-cXǭNhHE^= =o(' M~fE0S/MIHp4sLZ$€QtF~{>1/.+)m4DN ;R BVrL0#ɏɑjNsqCP䋥T&IN} 1 )('ns˿$ Ch@j2ubIRa"ǻF] CX Y`W NwZNa&`n g8?OgzǏ&/;r *'wt23*,DD%Y: fdobv<|uMʱen'O%_~n*go/fiqrRNù}ZѰ{zk D4oiXVOsGVYzӐ ,/V}gWH\+(xT,hy^6|{cC3GMR 8BCp eS*`Rh %e%+CRDYo$0J_y2BFF{lV!Mv4zO&ҏr5wLʱ=@(kFft^a6T"P_X'f=iFEOP0 b"@fb؉LYE5VKGuQ")Y$%o X*W@ a@#sLٽMK@r 6z& O|<NJsSw&nƳѣyb;k?Y}`3Sg ,:fAkØ!B ~j\e8@ tzʵa-ޓpՋѿ~8jNQ'Dn%r,[g-@m,0áxe@ަuw0>OstMvdᐆ?O˯@p~kڥ5o] K:Xa[,íi*"q]]Mݢ0Yh6B @1i$^;+Bw.rXUCRx.ۋBP^gc.C{~Q)VH QZΏ}y4,tz:JCCEB+hZ΃=]m'A=rDȥ=/= 6P3pM#Zِv>?2rn%o֚-ⵑ"ܸ\aTRL'~sۆCX#XOW \:@bشyPeiTa=V0@ ү#9r YP;I~z8e̖pbWPUXeHÒD1|8*FJPf%b/6;( 7y*f:U=R=u*EœpL}r6^Nha-gmT}]wۂݝDFC&­Į(<| Z3ѧ$ot7^. 7 xhhJ0C8U{ R'A渗# U\/P҂灜 XU9dc:op-R\>, 3Vٴhb4;;`eO-O~Rbw߳T(ftskYN^K\CV{e 8aݕT<\-jxe uyqNh. O# H9Rp2IYB Uk !J& AiQi{gH>@u_>?$/zpz:)w*f;w %M|M 6T{DՌ3MHBς,Id{$ў#9|rd[`SgYfϳ*rX+];g%ꣅGe %Bb~\}b&HX 2F`$ K08/~-9nBbr ?ؔp׶- D"yTGqQ,LXmnsgg-+0 w-O48F8 R#E iL7b,nhppTf5OPei'%n$6g !ע]2nwp@Nd^RR#b>Vj}[z N5H;G=]*)p2{sv`P ~NG,UkofHЁF:&n5COYaY?\aGM6dv좺1BR8AoW뒍[|}\.4λ)_mf>-=pXW39*<_̧uɟ/vƔ-xM^.)Xoi{x2"" /?rCy{goq;Bo7x_7 >XHݿ aucI?y=s )!F8 ;>%pI(k$WW{F)EH`~1911҄`sXx(0RT??~PQ \I[%z|%E 'q`)RN.9+.&$8EHp+W TY;4rR_K)b{V=J4§<FYDyOKgc]1DbY j43Fs&pȀ9JU h@F/ m)M~U5lWd'8V" ^]SZRaj&\$ 1#3Y$4!q&qF3 +YFmJfN䭩ϑ?5L<5%9-E VsokO1 L'~fQű:Pz&Š @/>^XS^hq Ư #:k %"/1VH$H9A%FHq,b TVƮRswC” Qt5|'͢zO3ҧRw> &e@\'aH W|8 n  )^`iGug4m:y5;Z{U6#Q؝ʢzo#M*.(q 33ݗ[3Z"o*R ܂KH&\$%^K$QYD*ɘR .kMQ_I EZdG4VVe⁄(ٹ\uNƒ$ˬJDdDCFF(I4e_-iRL(0PLa0-+c(ee2ϻ2ZdkoiL h?['ڀ˳< rU&4>;8:;:5e)UMql֜x`ׯAD9y4[}Djy<Ty,F CA:ɬqD19Ð6Ո dH!#3)aM^NtǛfhB8gcQ&X3LtfbV&/ ˜h.vsyc>9=; Joq $7EVӻ}:eV&uX#",AYʓH1E,I8)GpsI[# 7 1b0+ ;eCW ~ cx,*y$-=zp)0kO >ٔchqZ̪pq MsA $2EkkR?$A2N]SYsm5`k~%5O ~1{OGZ==$v7" RYSƣuOt4)< uZlzv#{3|/_aEUa8>"}5qaÆ9 (Ks=O/`L|f3Pּ3x%ߧi]ҬRKJ@EnnMPc)#)i V>)}U)6ͩw-O/U& 4Tw-5gw{jKV}* HcJ` BGV%R-|2Օwd?wPx'.ޝN!m JI ґĠ+>z#o˻M4OEKĝvJ>o.&vA3/ NBVQ ;ejas1qh!2N`Diµ.ɗ9G(1%-}uޣsY4:.w]Ȥ$mtTҢeH7\%u:M|͎{n+fOP|JRA5َ jގOkvs hi򢸉vDb\) J/Q>)jZONvUNpi ?h~.02S!'$u`2g¤yT0 ȼuz1\2Ғ*H{_;:buИ jri3O)SUwP5Uۧ ZAMJ7D4EhY#"w$5RKU/q%̕oR-J/Z8 C8홸62]T~sr@&1^ 99 -T)4k Έ`4g<4jQ!*-% )P?*Џz}ٿN v?mHZo,pb$=im):+b.QƾfWvv9Ճ"3^WS1IWC/7>k@&;KC㒇#> Z2 #|[FLf2 vW'*#'`8 =KZ̝QV ۃsci|rԲt<Es2y|ϲ1Ba $ T4㦉$hѯRsc$7yR4sK.1zH>':~Wܨ6 75Ilϊߗ6is4rt,RL!6aԹ%lf51$Ru8Qhj_*jΟ8ߺWOp*BԠUqE |Q>72=N00뱛5B,O7󧏋5 }-5tS~5MKfޤA*DefI.9OXnEBD,; }VSʯ2M姁>vީ2QB1˗jhQҫNQ{BoQL|℈a1ljk;JLq6=&䉴/,%{*`>-:-!FɡX ל pmkr'lEO\BIɑO{MI @Oٖdu\xwRNET3(:i|&e]|j`ɵ%2q2)yﲜ~ p"Y3j9ȧH2NZsm54ip}N6q-(|rm5'F:kM.X zRp2$<dgZڼ H1!0F&s4xnyX2.c7uwR?珰YiG41i&6*4W 0LSsV%;orft{چrWHE`SRLPɍb;<䚔BZauo&/%6.oiI" $6 #$gǰWzj=rx2q%90Fr&8< TRͬ1*MH?oPzkSV_k[Y@I1LFh$7qd9?ĝ`0-&3ץ(?<,/+Wte^f9~bm~ze}Q ~C$RcCBxM%[As^e4=5Xd&i3G9*C3ѳ,UJϩM1eϖYm6CP=Xj@#(0OM2.Mp׶2ϝNCʼn:#M Gw2)I11B B+s0:TErVBoӱBH)Ju8U=enܗ̆ o0.KVO4PUOo^ø@kPaWBOC[]LM}oi__lXw:  FS;_<xc44:f8e+vrs dU.$ |\_ykީHhӂ\Ďo}<(*iޭyKP5Jބ 4Of˭>ʣRР]xFm 5BcU/Ի1VE3)W+)"HYe"FRuUU .rSDajч/C!Y6`+ _A&V r%b6q(&9h s9FW^A alsd}3Ų{Ŋpbhu2:M&7#:1̿.+)޺w+j-HRԘT`/ge h#됒gSi~4p(jXeWii̟ pe{pn'R(.@.Shn v'W5}5o8^HZ1vxF 0j+7hЧCi~6;QY7} 2* @x>ŕkcEv~Z<_@~/6ɤ̼b)8 қ彰M OY0I-=P$@7tѻoY@`ʊќne!+ 67`M?!3Kmu̟&b_/鞷`ϙ< }Pk\c4wX_stۺ_gx`wMot1i1|QxjJN8 ",d9n>dZ,$ Jk p@s"去nNow6A^;<lEJ¾)LX/1F֢ieE&U}@$hln4չ42#;q:92 `j4;X~~# VHrjCpP45!3jv+qm ^7+0:( O.mkXexT.yaTy'" ?ۛeҤwDEo;{/L-91 3|^)qf,dtLSja0.Xޡa'^P%̦6 TjiRV|z$w5L bzXN}qӬ>(~l{K=7uDKs̎{n2jBek}@;o|S0&z: ]LMO y`GWh3&4z8v ٩r6In/GLKn-'|m5/FqHe@Ň>Bu=w|p-~5mFq!jZbu [vӈ9Mٴі8^Pam[ȆK ~ R/@ܵg(yEI9_OG ?ňY[C]9MDJJas.맛Ң1ګeh:-Pmٗ 9=~dIE*skuN j{-Ed1\CTaL6Fw|w܅2XDcQ1ƐY΀wD\%:@֊Y g9/~+Swfb2WCۨ0Hs:8U8V S_fA&vgVփw𮄈"dD Lԗ'c֗IN)Dr:V)v6_ΌM(\Gw|S=s$1.[;N.۟.+Z#S,7j?p)K$ALP&wC{ wG>gR A9)\+ݔ1\0yD-Y7ƀ{Z灑<&`&q R/KIH.Q>k͉u*ߺ>cL~wm)TZJ8Y>irP `Y]Fh{+|o'_c"qb;rl *^jnt;PԆvCOvv>竡Â.8_݄2q!]G6rL%|. R-3,!i BIzR)aB'єS[J-jd}}Na HaU^.U}U ]v4)=U N|w@d id*ƭwJ&L _}dXF;ɟ3qrH40{`F-A{@ j=*]$#@ yǩD)ݨ]qKGׯLYzTz` |fL@7.(P\}mo{z'TQ{Kq15]414jzi UruQN& F8*>ߥ_9h`mM4SRd9 -ƌXYS~ܛq\(UT\AkQb'D nwa͋bHqmʊ25h(qUP^P"OiM0( ӝN3 NX:2i* D= Pz]{ضIH- IA0'O{K2 `;yϋK?{F, w i6X,v<, ^O(#;3?dK-&ݭArɺX,~U).$UsS(fw 8klaA0꿋uG%OںIОK$9Z4k&V7WMG?~拤I/6_yZ/%UEK%6y +stDLwͺ@/~h XtnlL7EUf{WWzv@z C;cr\CK&ظכ?s}^Т#˥!VOeN_"g& Xk>2.pu*b>{HesCy|e *!cKQ e5OL-Kszy(4f+ɢM"N[6xZSN򔊓e#YDjWXoFyql+$WĻn^\ƕqoP!0g8Y% o)-)OY* et׫mJIرqhZuF3CW$*b._@0J 43EAgwK}d0dPF R9PZdu b#foT:6H)BRmޏ)cr;cN.c:RdtĨq%+ T,B+RmH" ${1 cXS1~L:\Tֳm[.̚b<qiՔ@: woחOv0=q5Yb>MtO+ /.>m?k] ?V7(Ւf7Ԏy"<ܭMQynngȢLUަx.\[Ec{g;Ywn17s-l,iβ՞pw)}{^ !/\E0%՜ZU@5` rj9vfrE /yŀfәL%f%``N$Y=ϒfcL刈ɲs~esf9 2̼惤n$U$-EܤUPNÂW*""DtT"0Ѯ5:"F>=OczDS>.: #s(5(?[r^Q@׬\bIfWJ5H on@-l{¶)l{¶mF[kM $Qo:}RM;A &E@U (j "thA2--*h! +q[H:P$!1Xg~% EDCϹQkSj>YWI'F!*w51<RX [7Y")w^z}dv.զ.茽}sޜ]_g?Yn75 &>/1s0DLm$0Of8̑A  ]Q3ǝطl<ΨB0Ŭ\جk bad֡-T"`'(\Fev?;\$DE` $K).'}ˑ`NWVO Ryd&Y(&*;*(8O}W&GbOի$F%Xcg}`a\Vggs$1 [$K{4+4XdD2Dh258lC2ߒL"?'IM}oB)G su{tIIoKw9ўeNI9kT)dB#ֈ;4|>L|d9aGz}X>_6CyF}M:OjwmTYq?c:(~^GMWeɯ<=Oc ,QR2S Bu:xɪsK;Vdc"3PR59Mz!dB1!sJcls?@S|ob۰ l4cșHCpޡA7uOUf5,G*Fb+kPi D SD+ yd TKehBPcd8(1Riڂwq{1^9+hdщ4QdŬ!NR-$4\ْ DME:a-6#MTF F95!KXn &b!4 (T,Il ՐZVi6>HSk_qbfM.إ/BS2M*nV9KJ.>]vѥU@j~oλֺ 13Oi>l6(SJINw3pioԍwq'N4mo/xV{]Kv y*ZS0}zϺŝ Guj(nLicۺUk݆АU:Eq[7hOhjy:cXS"$t`cZjBsUN I)Z3•wzsWSq4A>h2#,L% ਲ਼DiK΋M sA28d%0p"nU\OHMA"jcQsp"j NHKdOW-_"Gi( 1HԆqpDDdz/9X9  B_qM\ qe%8Nu?n:LJ*Q6OKos@f9hux SZJ`~0,Iΐ):LL?ayt0iqJi&hg$OO. &/Qgdf&9ǂIȏ3鐀}B\C9J&Yh՜Ip#muh(Lѻ]p8hGbxYZ\Z/C6<8rWkjd0XSF :P&)E}բ?kIALaa!!L$GV+S_ [U{`H#B-= "Wѥs ..h4R#N1BWX%(~A܎2_8 ^FkUVcODm]75\ VHAb Q(  9W7ﺳfRV˃;1J`ۜ`Qf6А5:Ng8'n=OczX<g*bIH1P@$8'X.Nb&׳ 1&78*sI {keL$0ZagFCt-X'o :n(c.l0x FǿTFӞjj}{~`5,wNpK e|9<%3A.&nI3ھ:HB2G&`$0K 3mr6[k+BDK! &CѸ b9t<6V4-hZ@s Tf1@rjǏ,ȕ#c̘ ceۦv񨬇' IKٱӓK4 c)Qlu0`RkL<6}9DvyXۃHE tx (b|ԹiP}<:Wx)J&0F?_/b Z5L#5dPtSZ:⢺!¨hyiV^KΝ0DzbHEVYsAǪʘF-,(m2d]KNQ5W8.CFbBB8XADq@`W):V*1W3h;,Z +r|ϓm Q &{r=А5:řg8泰n LjU8p1*m9#-?}ꆀ(ɛ8%T&Vf}}7ˏwm͍J*?<ΩLřr h+#ۊ.LN/Hu@  xF tVfzQ$$BD%sX@(ZK1(ʠHc"E9B@B~ݠ3"Rb3 mIL5c*LPd$LCN8-d)hpF *Cbn1]k-H̢IgZC,-(ti%I*5zKH:Ql=pRda~nќ*+Q"Ε0B'֗=aPCxP*0`Z&svB'[/ DP iLEͯA4BZ}N%t1:hwX*d*Ǒ.#5۽s~K땬]?'[sDh'bAQ(y]6˟dp]ulߴDHWH*]?0 7/đ4NX oϰRZ4+a)$iZ&(I8I! H|9b}4O>0 z,~]ZEWTƊ+a0  )xRe8`a3 O:,>Ťz sIAKJpXPzmWHlâxl{ZvyqF;yy6.$b2RDSr (9]\BBsme!GZחIiXXmjZw%(im$eG{|5M0(rw7:GYr//N4wЀz*nԍ;LBؠ S,;Av),l߹gsR͏y^~ݸC7XޯF{~HR~><0 0/oiXN9d|—TrtH4WS:$ c5^q-ju+hB#hqW-UۉJtf_u 8G*P Bp7qTlXe/X8&ŀIdj!TڲPcԈ יbJ_p8 l)_O)FmNQ $׶ZXWܾq\SXSha5bF3db-3(C w~zIH)Lvh]mGLjQǡ\D-lj|a,Z*t_M tgMy^ 0uUw o.\LA>x+-H"V3heR[]px)% w;{x7'^Z9sY #EXOixXJ &zE r kxzϮΨ%j|ԛF+_,BOu*tC ۬3pw4*LEɋd:}џST.juX_(3px#l//)) mڵaIˣ?yJoTiLD "^0@v*ttXgd]=%۪bo'<B00<|+.ewK˸ =$ŴdE $;YT]sdmf; n= FN@+i0 ]uN`s04 5Voq]ue 7E2G$_f({;چek[O^:yZ'/)_XB7/2enLLY7#}t V(ɮVz9tFG4QPFwD!KuBpU6-VU*VUpkA rF\[lݬ%*d[<x.G!uzqF ɪӵy poF̄ʿ̛ 28ЅᴁD7TCl#R,MzH7y9ơŽC[<$*s]yk@# m}=5:֍{xؗBT_Sw(ć_m><՚"6hݝ^ҾC V%JPHSp:G7Cr+ |CH/<Ҕ/ n>י5@)YS ⵙЅ<Г4fGq$A#W}rGH>Kfe!E![]uoi ^u>UP ͧ8l~LHf1@C0 ]AC:m)-ܸEF\\|'$vʽ5۽as1Uz/$#+?8cnAfleV nL6<-)udp㝙l6zx~ nAv//ET0%Pـj0* T13d@E6XϴO$,5*"ile3$5A(F =Ow+&oN-W1}4T&sG8W(ditqnS0ɫ9:;@1lUgUҽKg#Xa#*R.F @UTvn2CB`RN4S`g87 -;3*UheL8;@)oqQ3j aaPH6Z 5?CqaqX籡sF>V[OOL gn7=ga~A"A3YQY!eV&+j`>ہWv$<&џۮ=w+e=7ލOwYž!U4 6$IjqʵbR# "LX f8ʆa&=Ι _gj٤[ZҳApRBRI8! *+KB I BP k,\~h"=5*ҌXJGu$c-gϵTv~qWOf:? \qeă#̀{#ߞ]߁)ny1O2\XI"4eeoXki'RIƒڛ*$gtO,YכQvXL/cljY5$Dg5Uחt9\; G6DR+Nmy 5\i,VĆq")ҜiY@L͕}*I C9` C)A[ Nkl,~_3e͆y*z*A#Od&=Z0a1|eZ4lkփd\ԗz|.w?}5Hܢw %7w?,0WO 9[`|f8HG|0zhQ3*)>3VZx1k1:fQDmdCϥ?}="50]>)hu Qַ~F䏍[}ٮxgFvZ.W/ӫ#;Z[jj4^Le1,]>瓫K+|4j<5hj<`U6g^ uKe^(~I\`2EM.a̿ͬ{:zeoA11Hi<"\H#@T$Rl/7-2&)bv"kw![<'33ΎF]EqE.7^;my_C,y3]>X`GǍ!DSNQXφ1Ǚ) *7IR<6ȃFx1n9̥;-q<_W&w!ɛ^M.IWn /*DQZY~_vN3=ޭ75ӯ~~#q唻BdH$/\TVU Vj ߬dU&aW8nJ(FP~`TV:+vyV|cb03uDq`w6_5V'Gl28TѡTk1#Z %Y\\E*gX ިί@J~6RLQ&Kv%K{r}k3`na")q"UUE ?[&\ ShSW6uQ9Uz{j!YX!~ fõ,]tV5?^ґ8sy5il]Bltv~0ӯ'};󿘳kx2ChF] L?iyH>&d|N[Tږ´k6oG{b#6KzvCNZ[[R'zk@J; |c:S(YG\?;L֒0֣k8,ӽd !QJϢLE.ZT&ZZmm'{_4أs2^Զ1>Ӫ/7KzxQVw!$ӽd  F'拷]?z N)g{-rh?eR5)NX1{v CPSZp2Tv {^;Vm{#Pk]SpdZ8X{uV'i<mr:du^ë ?f˗ ʂ@%]|CRtS/B$ Y$Ŝ bE2zJ="=q&f]%ߜj[F{ #DJ6]Ev,Gj6uY}%YK~(xilj#-l!`[)E5 ,Re%[u:]R 89iÂ[T`jü"_.sAڽ{ar-ÐaElvŠsߖHEaE U4K*Kcu {M4#$շgQƀAٌ&hACflhfߩ՜j5޷hhhf !'D7 F)C!(u+%{eqJ$8cw v)N;N@4Q*KE%_D{\ ιTHf &aW;ZH)(V8s1HcrփhU7Lu-h#Wޓܵn05.r1HQ1X7hukCCf>ܳnBúbt T).BW5t~OY.!\E KAn-Yիa9ό1UTrwu\Ӱze;[ЌIi'rnr%; }Y9gDxwW4o~fY,f˥cb_,6Rw욣y80XDoJrFB际"MlсH*ݞgjj6!xUFB靉bUUӚCIFJf>GLfLeYtU<…̖K,DUzݷ!䐹}+"N=K,AIx8t*L;@"Lj41X+"|$j/RN(+}KlΆ D%-fl OaW>0Sɥk>KѺlgmg ,%Cyf er og?7*\o>_a6+YZmX]rBtuoʵX:,X)p}5__;*I[^> <"ԪfiZ˖O3ҟc> k8S-3=|7-k0r!l  )Ӿ*hj +EVoۛo)/lJ5\(ma Ep2 BXsa5no$퍿ta%d'{+0"IxeJ$dKCѝ4; Ր(>$˟&kP,T3uȑW>;]L᷸X b3X~3-ާr|$mQDݗ''y=w$CO'{}+(5bZ3̞K:&;t)GdF3xv#c؇5G똻ݏ|FTzF H煺00GLk+u`J>^9ʹbK>z2Q=tbmz%{CBA 5Q"0@z #g3^A4,E?ި滨kqFEELxdbA bK-zv⑺H-xt.Rk43rtXU,Vh^eѼ~6SQSH%-L$J`%?)$ T{[0%SbGяb+qBuvUQNFW,Zg\\͞z)OM޵ttjoRp8LCߺm7~g-6}Ң ''R.@JcBSM5{A aon5O)J{VB#w#ʽ"3J*~'EٝZKur(yQX N(Pi'cպ9MPrCsmY!-1KmH(/r#"DYhir\p&EzESo&'$d.F笅HbsU^PjOF u 8D=I5􍜠FSNH$a/)5-.1|]2HF8tJiq_' D 9#'+{UW^խl/IEi9.x$B)*c&05ZFL;hALȤa ELPWZw UVH)8nB~wk5N>ielĽXy}lS;C=$Ge?o*{q mwќG:ڛu+LhFOT/ͧl+%3Ocw~da^^]vi"2y}XU\alc"͎17&8Y,C|5B(L^wS9+d yLiV'wmFNpx$ԧ#T&E A&rp0T7|Fdn6u}wk(h+o!׍UsEڇۻ/7mT ғ݋Vje(ʈ@mRqGpA>HsDCpC)zHc]"9Bt+wחbk4#AvYZhh##̪qTQ+hTj2ZPh#Vn?T12?z `RjpB)8 @Q72PsPQUF8а:1a١ K(+v )}:O(W"RaF ܮBd<\xMHI1YNRQ@ģLΓ0+:j7XW݉V uP=_s @Ɂۘ$eEI,u\XB=v6 LЀ&i+ PXmk{6@<su 1!%QgbG9ދ`rT`:#?4c99 u$P\pFywt4JLmwiU5wJ2 \ZuK5$4[. b&D6_TM Bb6P(tiK5si މN-c?,ub=#Sj4RTHRHA4Si~ k*'AMB5@ 0!Al;*>cL +,e f\ qDᕊAs_?-%M)U׃B}_QJ@aR@)r@!*m2ݐ͆X=L^ @ <8!b m5 C '=3/̣\.ImqOݲQ,~ƕ2Ӣˁy@W?qEuNgY'!>>|UnD y%qŐ4 Vt?y@M{(5OL6Zl8[QJAjn:. vXߞ(\LZS] jK; 5:hy̍ۜA-HL]v{)UH ?Gi/㺯{t|ϫC53>$JIp D9,.*rSt %/sE >щ?OwEv5oП;YY.-MγfiuZ7 un=y9-vPXI±$Oy!9aW Q6Mqup Н zkGEHZAT-1XVVd-Wk|Hh0J-HNytEa>}f1CVkj$EۉZ3"@^ύij|!jX ,FOɗEP!:( O\.Ģ܄G:>_/}3Moxeqsv;Z%_13pMŘ16 RAZPւ0MH=vR\۠~O|ъ }>tCr] aqV`ư5[ wߖWs̍q;C͘v1Ԋ]R&:ԒѶ֡EMCD6x>..cƋϟmlțVLۖڇ2m9B"=!ۀvQU $*":saOW y]4\7L鹽i]BP. .BJCi1ʉ`ؽ/ kϹV V내V$LQ$t!0\qZ-'D3V*YQʤ>H."ƈHvHI((.zCdQImTg%L,9Uρ1/Tx_y #B>VZ3%.+n Qe8IJ@|Jy˳T(my'% rgŲժ:4.Ym'm֎C0*  J#zUY'5 OhN*w'*smu Nr{qӃXUtǮ^4J5.pLvW(\.lfG!Z_` Z#qT?WpjPpX (Q|;Σsߚs񍫐A~20KbM4 7;_ q}mˊfEUaNs纎ҙ8S wߎ Iyj%9[qs\*c8Syaof'k7 fae&WڰZrIWsD)ϧ_nQ va~[/ vy?' WĀ twzJߜ`t-gVT<gI0хcg)4Wc<kYӔ53`0C):䕗q_y "m<de+Lc@x;^j)8^QkuwE,1ƙrO#ضͼc JƋIk#bz3Cu1Vm '}kKP#V W%t_5pL T-ct]q|Qn1G>0efZw̌)$5k7,P{(`}-K(.;r >3=-Ykw ƵcN0uvTdB8]W`3]}ÉؗlI J1ŬyUJEƥuNKΈW8돝FE6 BʆxB E@[O3NcL&"MRuP0(5JS9Ie}Pn5|_!Ą_ΈltW0nyfR7I%DUZ HA-&2^ycϤ_~lm6 F婏.*ͦ1K&()A|LCbK2B4i )s‰OPNWI% ޘY~O P6Tb Bqp ҫ!4j ['B3%B#4SF> fLo,xFA::Aq6^$Sĝ_Eu{`:w3ݱ1ICXor^sVCv=jSX5)zxWS؝jNpNmlqr+g4uLGG`7_;{dҽ{*5䫐 ׿pY}kQ @j-fOxlpnIvj˱06䇎-я6*}udw׏-sx`tH` U{a+r|$ϧݲX+7.=~7_2ڷv=N4WXvDjxиm5+8tNgB>2uophFuGnۡ&䏴Ϗ~;TC9$IR=X:t"L3K8GA1NpSe˰^Jm&0#.1Uiaݭ8|\OOu?nKtsީs:J*suѺEgOf>][7+<,/Cvlb&ٶ`Y;Ŗfѥ[웬F񌦧I~_XU$fJt7y>Kqvu1|R)ެm8 }sr7#XaIEdiW*8n+0Jܚ13+2uvҳDIȕ2%&3t\\$e[~}՝k}wV&7aÇ`}nc!ww3l8m, 9'Mۻ ^"rl<;"ULs{"9г'`^=qVPZxtL#[<GDB^,]ǿnC]R%CW-$;g=qiN<\*WSxdBApzmg ±uﳹH/lD-I<7VB0.p~mW#-yATC3".e4L:. īKUʅtS4$zK(*k^)E(@4QZ0TBJNJJ<֌$T Jv2[D{&YޚP2S0L6HǙ G[u5u?0S7꿌Le&'cp%=h?WvxYo̦| O('Yj~B~. s"an^pTd8uɾJu? B$Cd2PSR  ۧVp=VaSS0LX&#"18)%!֧OL+M-Ë() Ot f) s{) R:Msk QX0W UY᥮ OZ{$OsI(c5\ rýSanAFIJ u Nݓ|$s} |_B `!_|_K0G4wyێ 6ouPH,i*I9bZy= 9o7͛a2QuQ ڇU~ώa,mi& ɖ@[PuNCWnA&'?]6'u[Y䢨hQ!o\> &FyCQ 咡4P5yICJnr>㥩!!w4EEVc-%V'Z Q0Ur{ɕi.rNJhm=9g{NjX߄*x7"7a#QV=IGKw~0 [[dt~6grgRA*o bV .P6h[*T 7!w ]HE[[~kbϷZ@Սm; ھo,-QN'_l["¿9b(8b^ʂ~ D\ לKYuјS>KY0ennxS1ߩSoY_Zz9*q pۯ2ƚwW1ϯ4Jש)r:~_<pQ-.Lq} 3rqe6\ϛ~^>^#흆9W  [m'07 =||VJ$< TҴ)Reba2anA s4\v`O$kg'ȓ#odp+<:ɶӳmNGZUuqZvkg>mI8/~ٷn(C]=6OɞjN|;̉9[;~q涛o9ODweV6E~hu>; E(7>t^Cjh^HP( E78j@W85?['h(Z;X+08F B 9nyA;V_aVHk(ӇcD>Q9kDnK>T[o[ͷPSpg'֦ަڗt<1:אT%G8zO7Nn jsw6]gZxl* )C2_^?env*=-vI!Vxh1Ce?"FJ<2=M ]Wfs7CT!"^D&npF+K;rQrl_HNN-J5KdcݰpN%fEBa1Oc.F-sPdEX9s)r!\"'69 /E\_NMW.MIsD>(ѼDɏj.+yͺ)sЈ䒩;.Κ)K)Os|(٨rv~ʱH>{Jv _y?8:Es$\j`X\ޟ9!H4|9'_H̾|!Ɏ go7^TDFw=XĒ!X5ݰVu) 7NPOV-bPVП,!sP/?L3[#^1ؽ|n]1%..R>@Qӵ#΅L^:Ѕfu=} TΕ>q{ǧ3TvOA.Uߠ?dumYa'DB4պ#9&uGS0q74r|^KW9G ׸.W5zvJqnWG.SHdC誧AS&Coq DjhQ%'V}˱u^5,VgPox:^fO߇_RnF`tzu[ݝ~}ԣSW\`uu Rؑ;޵((؆R`[0_~|>!qfo {zA=q9*i `k8>RĦzJ?׀@ P!L],DŽHuBKQ$V (uG(Aۂ Xq00<0(wZ6= &΃Ԫ$;I B=!HP ř *J6D{D:SMA ȆAi[`/tʥ DeBJx-=M , -"3 hm"s':ũŠp^J$L-XOX! @+ZMUAQ-H@PJӊWA U (I Km 3I F F[[w0&Q2!uslU.?b:=& )ZAR*e S#!Ko_ OS/egwrǗO~D88yc>61_n2O^8~M~񋶌#6.6Y~sp0$ތ' xXb>O69|D>2$&%Ⱥ hr ( dYwȱ [^rɿ{k-&h~-3͏e=Ch~NwjX_;r5NB㥵*'G 6Xa ݨKװSBiWQT]]ZXqrvjb%JdkK}vbk_Q˲5Wkk,.zŚȫ5x/zźOv_hXO -&9i`-KT Q/#zb!.zVc(-ŭPZ^X뵋V%V\+Ae> &z`^KIk"j9N7q_ 3uG~`Ï{5ٯI `̦ C?b'TrIj\- 䫾O$d(~&:5;ow{MR.%4``.![2Lf`7yp ~-l5ql<+sEc&>^~fm5 uÀn/A[,;dN.7gOesƭlfo-= /sT.W\e a]n6hM[~AV\FԹ}K&vŠĶQF?Ne#iC:[r"%Sw>&Bq!D[,ED'2ڭ8+p*\\H+ݪ+()jvg-"F~Lƴ[Aۂڭ rܹn1_#+f !lo@6189H>CJ5 6,WGwWUWŠ}6py$*JnK+z&4䅫h-Rk6<#ޭ.UT;nH2U,Sѻ5!/\ECk6(At{wlUr:0,a!Ui@C^)ʏpû)ޭ.ƚjvzQ Lox:wk@C^)yӻFCVĠR;9`]9 ˙^" y*ZKh[ZյwuZznm!k5fjn~cqvZӸm{5`zoh~f"Q0I׽wjӢg; ՋN@Up|w r*VWXnSuM!jTOWN== 4`TW5i 5]qSWԨ%tm 4bH+i&mMj*qW4M4j Ԛ%ۀjKw F-AuU8K7l <`׈cɺ{`o8֭K]K7m L%عck."r\c1 /,-1 FUrBh嘻s [@cjMYcr̍Z}9fr]IK~cC2scnkaYJscns9dY!scnT1w9F-Jվ*r] K(jMY)Iscn@ڗcr]IKߴ}9fMr]QK}9fbr/"Ǭ1k)Ucr̍ZԤu9fr/! :Xr5 rOZSdJh A1g%`7Z:XtD.1Ϯ#G6sK%!Ll3f#I3r %˻O\/4;BPMR-xcݿe߼6I8i NG@FG`nLF?0Z``Aa|_v+ΐ 0ndRۨbӀřq,%QD\/ odr&qRT U\pb<|d+jg77qCJHnƅz4vZT M-z=,Pm$Pai0` 4 }0Y(SST m0L#[F o?yFIt\SŽSĭǁB8Xn:GRk\RǀBb@¢ɬuTqb1&Ya eXMW!%DraA9X3c)rZ&BOtPFlWDc&+NNx"UY<,Hua=3al& LȸiȢ"b)+N1S +&0$F;,vH6煪"jEWTFi0)NQ:BdcJ%4[[TZbBYz:-<|joAc+?t$=?|8?z9[3C޼_`V<yx>ADdg_UYtop|qևoqV!tZ)326X1\|# _&ń=èhrw\350XΫM,.k_OSa!Öf[ȯGƅ",1c0B D(GHˤ(_^0㝴k (/pTpä73co'nذz?ER0d6# ZS}y(e kqa_ϓ7wMC8{bwe3?|g~܈KWl SGtNujWq 0d솣0=wx9|)y/a:k]׉*Ԥ. ع-QxJ:ᴚo wUХ4ݹpxհh*X`A f yg#I]Z5+&E8ۑ1ə{9 1$WX{9|o+Ai쾤GpyBی:z<北BzUqk} k\ %Kɯ\,X_{I(/S&HS+̮(tMf!pv_ަ f; ~o<y2d ecX>M31qAԋ(Ȉ)!AŔFb搔T@8y2({6Y[,kز"r Mu+Z^[b}[oܲA8/_Nn6IN~4iI!˫N{qSݺ̓.dq^]~Ff>WҒrZY b?βn1kنRKG<~gpuL66=&%hr% j_}iKRfd 9&I2j.ƭR'WjMkQ}LBW_{W#hojoìa)=ɄJt>l+IjOɘaUӵuw2t:q3+m'^*xl.PDg)ZQ6gu@.ËQ)o  oR\  d_M6#yff&!1hbD)xRUה 5Hs#>*e=slX*cՒyNx:;6lSwl:T t6tr>7R[0%SOVU2ʹ6ƒ 1"EN94Xy ~CRa|:PR tN_q :"ν<=ӌ+(I*|JF^;(}1N6ʆ+"8hvK&_ !Rf=TKS>10R," E1㩩)mnZkZHq)&>HE~N Aa㫣kT&v:2vяP= #\H* ƒ|?]my.^:fij3s<^{pF[bFsAcBjBW]%ޡҬDO_ڳ٥Q_]^[op&j)ƇE^E)Zz_f[_G걏9'0;e]ڬt3p$M~^Nqx;F#=TʹqwDo8N7iv皭o7]w/f-@scey<=xV$ #ҤЯGdd'8);wF$iAGIBv٩ WJ í*yAQkʝ^z%zSb9"l}Z\qaZ `%6lyފ@^ɖ 2|F_^=u&0rXN'+MUhK7GS/a#M.K-9sY p/s9&ƆEvVn/s'{Ii3gnעY*Tc 71'sEu: Mәhw,5ΥtXb75膥u7bY1+s=6cJTbcV =4 (Ќ m2˩DTP$xms -RTyyi'męNL;\w?$G2Kܟto֦-u/P)Nv+srO%:cQJܕ8ȤF ܱ4Ma,lWiF7RrݑGDYefB*ɔgao[! Byg!PFT鲺Q TaB3ϸ"TFlS1{'-*…Rc Vf)}Fw1#$Ê3PDMv)[XR2j+&'07Dirtxck)ty]M؀"J4>0L璞h_nv2JQ2ˎ"IYI$ᣝ@e)iخ vcpTGsTmp<Sp<핣uX 唉$bH[=Uad:#VCGɹ~Tb;*˟ +h!T+aiT⚰ U:,TB"\gصjov,)鮊ʄ|u\ u&D݀^3޳t@`WkfOۋօfyRr=ڣG3 7^ݣT"f eP@A{HnLA`'nGBmznaٙ&B*&H϶&rdͥ$^,j6]jVdi`~#$V7!s+,gkI5Rx[If46H$@1fWQunRԮbͻsB$c+ysO ;Xh4VXcm z}zZ_j ajbm&O=1/[/S|go#nip%WڵKhϞ>8Q Fj}ރfzc줂sfGE{ir5޽e `iy,ղ U&rynKv^o$b0H"->&t蜆^i˫5ۋm4]sw3ܓY'^d䞚u1RDY]=șФ1ؕ}GD6ӏ8BR ӏ|\Ը/# VIU;ZI-dXbmL(5Z?OEkuʹu@[ 夲wX|>y\r։д\*: WWuo'w˿gM=p _M;7gj\_}_K^I ݳwYq].Uc i| (;uF "K}G*fhP-7wU<`18hI¡^T>{ 3b_gg: 渫>q2j*49(6 MM1E" D[!{5K, )6qƛ2^]~ 8 ]8ougɚV@'zFbJJzz֚|?̀>*)m+A+AB"-OQ[ª.#eaQ+ :zKj$G vƼg_D@B-;$w>7nPԼՊUdm8ZǹChDKl=Z8VΦi`S Q_ii 04Fsy L[ȼ`<AV9\QC,`zVI!Sy%-+:QMH[jdM .4 xPAsoL#'Ӻ25OJ6 wD="3QSΓthoX6O1}۠9jV8wOqI"8 &l臤 s;jDsnT%?az9c8)>e!˪PGNqlH!J $hs>ׅ}#FV*ñff Fᄚ|5xd2?A|ZO–KeL+T2)(ikҳdGqyxx X[{+2g1kV;Uoןt+J[eF[CΚiBګD;7r}6i;{\dy-.7jDFt|C>'Vi U&X3ڽ9YL~nPjcRX0ދ¨N)zc"y Hk/0]:cӑ9?3ԵG5/W}j/ [&oP.(< 1` ~jcGg8`5@J*rIK"p5m` u^ͻKz/ZJk;E}I)k:M⭶ݘ {,+n-u G>aī[cЗ u|qk 5{5wrc]K|94mrKwsS9|?5zXlA~PQ"BuMGǀSSeT.Ws}shCfiZ* 6Zeö4ސ>yg3srڻbaڛa'kh[_tȸ~i}Ӗh%ā,is|e{٘N;=6fX|{ޜ/tg)Wyo4U;n-!' 7mHFlCKl䕛C7h w6(e]T~ڳgr[hGqKr:_-'Oq604iĜ}^d\`qdF]/q$ Liy4kw1l9 Q-2׵5U(G# zzT0FN{ӭ\!_j&Zt``\Tvntan33]N,M{G۬0VYjڀ k[[mWEMbN7ALuϜnQ<3h%x-]ۣRuc$r6\.: !x 0,PVPGYJOY`}qhFvcJ.+M_hMoKlY4֔JٔNs;^ܝY~jFmM Zi=H%ߟ|Iv~@?P9=0̰?Uh7ժjnc46y?_2) šR: ;ј fB;ޱXOYYu޴{IbWp[R ~ٯcfu_pŏGz1F0m؁ߝW#_ꗤ2戯(1_P]xj:hzc:ʳK͠hczM=y3sW|e9SI~Styy_FH~Pcjx*.+4"2%Ǭʭ6"QXәJ1,5PLT<."8rJ HSbga!$TzѾTO+&knS*B|- fm] f7ݓίnrW8 (M+fJRBίBМ\]:ycydpNt|̥rPi:H ``b-"b˜FY#X𹏳M\IBH v?^O\0k?۶WYP:H]Z+)-F(V&ϕx[Ϡ$S08-G?u$*ޜmnکVprdR9۾GFx͍>LilpߌG,Wo(GFsA*}6 }8& pjQl~W[ļ'9_lB&r|DI)rD:Z-s0 T&OH挂k̑#欉,膀2̐jm,s l1>R+B/XUNI{>,yKRa&p5 Ģ*7&q0Ncѣ&VoV6Og-cE2*/s2*/jEd]{[ۄEɐ$rHNa@fl4w񧳋ѓ]΁M&#2 dd.ۛufU*460i-sꄠ(2Af,q;r'eIT0VhZdz]5J27eNJfCL>( 78䯴]D$S k9Uo ] %t^\Cq 犳rR+һwѠ$JmzWqM~=d@v ގ=o+B=jN:Wpp$C̿8W"RBeLr4ו@rZVZ,> 5oSDhI^͍%1Q(t{A)-ЂBi5zVk֕ I%pG}lkD.}/fe,"uHs.D t<:1DO3њƤ:'D Y챉xkEPDK2֠\D{ФPj* "2K?PP 30p ]D2%Gv\QÅaC`RQ0!8D\T-1C}e0A41ZNR3l``:-6"*I)̶L3F<֙fVe(df,t*Eη ڃq6a74o.{[|G(cs_炦y {|jIұ_GMkm-M- %'v)zV!ځLKnIkrY=9~e^͹-l0N ݃\Q\1\~ժpv@v2=v 'M իͨ%d;x-&!@} |U[_q#I_~S9{ KKlNGd ߯%YM5I!]/U]ԚͿk'.\ <'^nozq렉vUs@C⋄\!&S۬>c-؄,LTz(ՄGkx90D8=ގhN=gw)k.>ʥ\B'ln'B1ܻ*c '|QI#>Ͻϭ tqr=T $X^ig eryށȐ[*6_&y"zKz%J#sRulYjRt!IrHTP:J{Eq%GԼ9Nwήϝ$3vdMcvjϨ|Zv&،IP`cvxHMj'd ߱أZؠ}IDErԼ]Nˁ} e?ea4'{$fA'L">֡ϯ\l '/kNDh8er#Q/•2mLd(9Ck VÑ[‘p$b‘D\ȑ|'܆aQ.F*D(p1@ , xDŽ@L%9>^'Ik?佨iAW'wy}0OkYr4зbgSP q(֨ )J4OX[z \6׸\ǝ|v?(ݝ 5lEJ&}|>{wz36+~|?y ^}zc-4OϬ?O#=DC؆9DmVJ?whܾQ,x|.'+́H@%{}h ï۸MX 6pH2όS{?0D50nL{dboq} VLy6w["c\ʊ8$;U֤Keᘑb%GC]˽u :EFFJϡJ|e:emA}9NZD'EsZ!$䟷_sAŽ.wOOdVͻu 1<NÛ_E]!eE4qfeXs^'la.X굛nS=B=0O_!_&۰lBKaߟ۟pdnjkix%vXy%kC%Js^ qeU"@aVǪd^8B*Vp>Z^s]`@e܇@BtC͔JN.@Y.k"&: Mu"ǵԇ G.,YJq-X I(yp%{b~R #σ%]?[\( >^4Pi0r2cK47[EF %SgHbڗgŇ_|FyGOSңh[E3Hf҈g&ڡufܙeRn +S;XӢFǵ7SDaJ_rOU7`? ],)T`FU`Ǭ6&yeeUeqʓ9 \r[xM,\qM%n`$]'[S9Wz|^DZʱ2UK߉kK 37Ն/}]DΗGϟ]Sy'mƲ F8Q_p(M2€6BG-$>5|q7Fu;2'XLhһشN|kVy?K:߸ou$SF;ZhSwgi̗WW}}\Wj{=^tDa0tbDa9r姷iGĴ! V]?\Ex5(D:K n:G9nwG] 5ǝi[qG !\Rgp<JL)ZJgNs8!Y8- [VТdwݏBo Ye +q.Q[ULg7~%|ź险+ Y )[0% byIUJx͕gvF,ZbdL@hҷ(3\)DhK19rSyr."+=(r P<֢ZBZ|k10WU.+K E 8zё-US@KQ+&5pQ+p X|]jBj ׵>&ԖܴΠa뷂1$m 9t1Ԗ@UJ-ih֚ Mz j[Kɓֶ_'3&N+MvЌi *x}T,N˂;r~[ \Y0m ngHCeڭ&&lv1XHFq}C]SݰcW}59Lx]s@SL|~-wb$ΒhI|)QC=y谚7׏]@i%/<{7^@u@]v?%qR|o }A/_hnv};QfJ1 d!$= 4PjZp9昐2+PC-77'ijt$u7'"妮Ahop-I[ _D&u f/_"Qm_"t&(8иq3u"0h7AsCuۙ\23ő D))S./]#g`e)B2B)*2$(` tT{{@P8F֧d(49]L+S2(꜒Q|`5+9:<d]HB`^,`ɨˬ@eBWۺoq w"9چvT4%K3Sz;LW1+[]RgBpIx$!u-W\"'4Ia;f#o HYr؜j2ߊ#dLqRv6W0%ơ{ID k`]Pyr,yOn%?@b-Y 8CJz9=%vD(}g(?=O'.F QFb{H* U7NL@& 9s3gte0(n2*K;7ud/̫ /7lWXp[^aʜ4k J.BCv|rJk#5ft?Ƿ?}]u"E`Y=-=k2 O&AD{R-Oy'UsREޞ/',g;R__MP3מ1?4D9ICpMҩFC+rCRy:ct{ .GҡP"w>4 WǹQr[*bT'u.>FA"w>4 WіNEA) C/>g:}5yОw'p`x$\uLޞΦbpu&EIBbB8>O s; [;'4Q<2Mt f>L` Ǥh[bVi&[sf>6RScR> ьճ(ni50vQ&dV#ȲCm2mdj%KLp2De3]Μ8}g< -ˊ@F{ɹʲZJTF\ +3:PC㧫iWU@VI[a)qN=8݄r,F1-K)  B%T`_TRXXӷ _GL;y:%"ZAdV 4ι:e)Զog1 #b*M,YlBu#^F5 5\;Y,3qy2Oעi؝ю"rz!b1Wб.ֻI%I =1+]=QL6[ ˛]"eM6VrG#P5fq%p1(kAÃA//^^heO ׮* Qi *ŔW@NCi$ˣRJK|gex\cɘ*&LJ,ى(`X;ݢn,*@ k'9fkӑQaQ0Ṍ0\CۻZbw&Fc_̟^&-jBñi}I2w؀QK*NYOU K+c}|Wp_ /Xߣ,l%+,lGd'̚"x/4G*>dKm4YkGKCřA {øeޞ '1%ctz&2Vn"|А/\EtJ^Vc}^G8ZgZ{6_KEM"8iҠ cI.cȒ"I#%id4/lCcQ^gwvfwg&n^p/m,sT}%8}s?8$ W>&#J3ZDhE0*p/zhخ+kN)%%&o!k.LV 9cɩ^>c.<%uރN霝}D5B 9ۊ]^_I+Me3+8ǸXW~*>;'KlA& Hs'}Ƅiihh57ŗ]z!X`y.n H~%6c6)jFٙTZ}ZW $|JYψ@Yр{@! 9g >%C[C|&B 5 5L*ߌg5ƅ~(BlMza3b@!H( E4 Dfuiėω3+y&^}C drW cKJb aY,L=>qOX1m% {ڊ;Zf ҈/5gǒgfO*6 Us.XVPps.J?9%)D׀OßM_k(zĺp)3n{ _:ϦF|cɩ,ak a X Z4GϷywYkFygЙ:E D3 :Nwv1۳ӤEב4֩MVjx-t/; ؞-:48k cvMR γ֦PG+&!LHzU5a ~r9m+ͷ3̝~tj $1u8)q?. &RD'Rպeoa%ΝI-p=p:XД"+= 7|CaYBZt}!$@w(.vk1ʐR5֮ 5ii]R /PKZar,sR7st>lѩmz*ҙ0Un"jySqCo}LnuZinw??bt^RȚx<5r<,f4ge zQNl[{ 2m٘5 8\Blctjؚ04RIapĻO@VK Rǡ'͚@pɝ7 ~$FN;IR_ϫr' f=? pQKp@<^ `X5Jja,gfjIQяzjwsrK;Iν)"+և7M _sfѻI;a28Y_N#ڌK9m"!rwR(Q3 2sl5 tlܪOY`B'u![lo"3|fa)FIr8k}hmsl6lnĵj@yj7gB`3Ӥ򞇜m^C/ 7}I&xmɞsp=L( [$SwDrS,gdj-MC8aY$,mۿ $폣cv&It| J"R HkRc:hwF_V Tގ,hkF*تBU>sS9U}$9ar6vT O` HN뫓sA)[ѠvnC̭}簄.#9BlWM+O;QPw͌Z,_!0i3*y"3:g^zσT*Tݗϻ\{Hݽ+)6$s\n-+K`(B `ƚ;_{fajIBiCFZ>i(\=/c@3sϷRi.>ale|}bBk]!N+BIԦM&+H}sIJE(Їn`0ml;BG g]M7 O$º7NUљ 8)GhN*1c RqRRAy 9(׭uR3Eh8s"b(wWخ+q%-0 _, HM^cM/3(&<\)0&~E @oľ|t|'.Fzau'v{ݤssQ٠g! bFLg33L.6@ !-,ȧi9% O=<£5 @ߐTSw{fO,HE,M`g%eў]>{x<|ż 6޳-ig#o~gN?a<y4Ou݃{/F_)O3L!ѫW'{?Mm0hhgGw/~+pە2ア4\~> ]΢W5߽φ&:sz?t${!2wf\cbů653{oϽq)ym: `E_u +-YKhܱM_誅u5,f-~WG حfecoRv I DMZwmց8Nwf{~E_T{S?_;u4~J#]?t~JC4 _gf@Osc/)29g΁t~~/X5b>dd&\.xUkٱ1]Y$⎤eh[ɊuSx#z :Uy0~A:XhATBb0VaBL+Os7\S4TZ';]p܍ 㝱W ^}A,vAlNhNhعZ; xN K}jv^ E.,r, -uvBW}vBk'vBk'vB .Vk'vBk';!",ON@ ;! q!AA_ taؙ$wRxv~;m=-5pX\=?} @'.0ŕ]5@}y_?Fa's$3!طWaV Y-B1=HɽZ.z+_y^ ]2O M )H0EnFZd},(-[ObZ i.n1%1ו殕j#$Z>ZJ4ʫb܀^pycZ9U&q[ {uGo`-rr"Hl-2Abݴ.sWZ`ȏaoɃPa7m7VE!Z#2h!fIn`H(BA o46*x (8!vG7&E{ &;j=I$Exi%p(x@iVS/TR}Jh3W} qHPę=q "0)@ឧC-}03@}̉ٻFr# pⳌ|ٙ   v=#Ewvr~ŖlV?dy1Ʋ.dU^o}볐GRn,t.I/Օ(` b\޺ hu1569_7{qU;RX8ySjq_EVP>]4H,I\ή./9 _Y~jfg+JsGӋXECdEm yx-?k#5XqdLpf,M\&S99WQpQ{5?r<#\ yAy2vX=i ' qŒʍ&$3hKGQ0SR NGΜBvB3r$iҰr RKZagag\IYZid 6TVgt=WyI;/ DXV(i"mSZ0/ (rWq鍱Ya)Xi.tYk/kl}2hǮR*lz2*+0BeO*S)L+TVun+a*i|$uٛHʞFm=  _ę'$g\1@"gLŨ"F?NKRÓkrfk2FpDU>pw4#M0+@$X qT#Jn5g΋ˋ v4U ޼ڴlKeYe;VjVNۙǾ@#mNY5a3&4۷~dT`.RnlnV#o,ǎߺ@O.6ŗ,ݧq$G+yc=ްH[Y3ax[+ddJxNּh5kZD$+Չ-tk8tk P|F۞νE C`Ilϝ֮N8̶N޲& 0=WK_I.FeTh=dЈ0:sct)_5}!HM%}K*Am\agn^8A`>'fݵF'W\kk{uuduSoH[7}z P1[y^GW ,.o1o0WN|uUn6!`Yӆb41+G[../^fol5YeTqnt of~,5`@mF/cct\< ;x<Hr`:M4U_;Ǻ(]Yvs Ni,+RuxƁdziȳL9'1 . lD3)-y4βPd3s+n*{X(-7³LHpX(t2Jo;?`vbB!ApZ 1R乐!gddgJ&ϔm&tI\ q/|x#Td"@cJ `8w̕"UCr>.G]UcuZ\hiIt cQq͂҆@#FvDL2}a[S u @=>xڒQ?p}T?p`}HF4baiZ-ʴF 'nj%(+v~7]zgg0 &}7@f^1$I3p"c"4ov(H72mM)'4_\7j44'JpknJ;$8,Te8iֵ \s!=bISA..V+C+"+Lk(_qt:JIQo)S)ƣ dF{n^2ݹ"l^&Ӧp8( 1B.b@GL'"> xU]"?,=7!VCe PL ͗CٗiyՆr f >F8({jkrru?-\C!$.y,N:zwk|{'Qإrw4卪}'~[8Ƽ]o+^&q;3]V72i1kPZFm9Zli60VhS"H=:3. gZ-4z?}&WNoҞv[/9V@ZIΩ2oDfhj5; mtRk&;Фbl?bB#5$;Jnq?AN8H?c}&:܀[S3'lB39qsm^ n]{.W|jOIk7>qbRP=(&)8{9鲛p1(䮣B: 몐[ Os>S[S,YJYȅ9'"=82bb.4Gj'}UY/1cY]~,wQk8R%=q[-C6S;#t /=r&1/E}ISERs'""G=s ^: #Jm0A K9zW %p?Z܃a Áy?}/KCzTKgN6u, :ln40L&XaU%y<],ƥ1٥H./[^ r`ĺ\L/}rQ8g7 w]JIxqMLz2n_hu?N_]ق[4}sM d<'T(ߎd8.ïH @ :@>N,c@99=ceJ[~~n ր "A*wo(augM5 @osýRk:@wۺ)ҏfpWxC%[{Sh)O 1(mz(}Ddkf$tIEL R>1/4WOHH'&@J 1iֲ'>5*|Aj?q$r\"$փX m:(l4I< իx`Dy=  '>h[՝訋q-_SL?ߺYoQȨj"ྺy? tU~RVR&Ff=Rw #*?[泑zD p~%$0R2W))G(ӣG,`'0!T%"v3f6 29Pmh.ISI6i(2x+HXMR{5}JS,8>{ұ2OP?26dR 2ە}0sה ~lU-gp̀KOsO., @(?~N_]=8( G _LS&xiΔT\5mK?V Ho ڷ ˖N5T_$y{i@JjRw;2k 49>fQK.FHJo!0TL:6ՕЬ3>h(PߺtT5F(.}<ՊX{ʕOF91]l(yrO1D}euuE?OEOVyns4,i5yܰ "VYSCX<,?\D?,g<̮osws X:~~1,i~E拕\V"oFȄsA&(-{.}k3`!/E*LGCHyۺT()t/[Τg٩)#Ʈf7iY7!enB( ׂ(t)loOLf>Eڮ7hgdWΉ'1uyK-;&,ANEaP[W>ʆџxc/ȸa^1,ɠ\ή./yt?~e UvW2e2.˧4c" rqv}h2BX^#Iؙ܌aAcF&s4 O RMT8VZKRC1ͧJ=iUIÁJ=sUEm+F hQMnC`0q(ȃQE4ifzFBB.זSnµ.M]tIrQ }tXi_P3LH,7LHp֐Hz]OFG 8* ع#zPtGdJPZv5QWٓuv#F~ JgMn1;^ѵ\-km̈́pkV)~Ǝ'Bى9" jqzh]}ULSAL&abl,{٪J]B0&6t2(J*ry!}CK~e܀>Vä:(/qT+4bPiY\b!J%Kv> ʈV BٶI.dZAiĠX +!ٻ8r$W v1$@?,4/3+˖%A%td](N햪HF| d|[E&vtEPyH68s>OP'夑2:۟rrxG!+H4\s}rUZŷ{156YI(.MiY9b%'"*JIV'UCAT'5@؈om(b ;_^L|^&gX۰bH2_GID@> ʣsw" CC͞vJUe7 v(<`8(wה Mss Rj*C5Itd^/@P[1ʔ!| 3PAi D$d*@9FҾ=*cozkzFF$QAJ\~ E=@! Ő(M*.T1>69:-\Æ 1 "Pi5pĜ'X|NHel̵YshbD68L*n*;ս5nj/)[@[8KG$ѠijMHHR%9 <`jT NLMP26ȸri%Aǣ̜$rQv~d_HURyz ';>7)Zos{J>+߁X%;Y{'p0J(jOceW x5N?KJs[n\'Ѭ#R>x{)dM*يg#npeD 9 kd\RJz޲/9PXL{:YO݂r+}0ofpJA !qܛ~o3Nng9Q\NaY_$qYvqŻOQc6S@DvY|<)?1 5<1 -nqW'XjIǏ<RIUjKM{nPQDq#Θ 0"I-R*[ +h0FlWH%\vK3Tj)'^)<>&ǯȻa݇[ӥ PxP]ch)a#I~ԸY\2]˓%E$筓s,y?"Y@QAF4E)$~6>ߣ *i%ŊDuE˭)MSFua_dåd *;x54+4G(9W-!YG琻\4ep?-..r*MO4p=GN|Z^lsBAɺ!!XO+g5FZvjXDse•c/ sd'zc?FAΆaB ؎hzlӼ'Nh‡ƳyC낪VJZ:]e4gy C6> 9z_{_3꓿5#>zaq~1]u!tӁm:/p弟xy/o_R-%!H缢(C8'V(MF4JM_=9_L3)AøyYp5w0l*Ujvo8 #gфW?nh>WɈ{I Sy(刽KƸZR`_%%6zy48X`#H]tL: R(D\tP2-|oWBG>|{ a13ŸQ~:n]Z()ҿ:|Z6hT8RD )-qv32{ #4-u}jY.({>׵Kzj3P8%FRnlz ϛ]?_?9j 97e9˓ w[$= ;7z 8q6㶁4.gDҴTQ-KHGǀ eKtBDkyӀY W=vhr$H+ur(4%cUջN';_VhϬV 8^٥k]q XM8c+l9WQALGsFSi4њ$&ڛHHHg(=N">Vgƭs9j(T5Ԍ2: M<5q>? 2xcRN$`pZr( 25HM6;ս5njPϲf\3r  )?PBc4J' sZ q!b̑:Ń";M ^_7gHB@4ȘnO!B/O!ͣ~)VrN#Spkӏ jYv"~rh*=MRS5I+ϊs!pypKHniBhه>~nc?!?-9NH;p56HC㝘mnCieXD Ole旓:SżYQ|X>l5A#5dJ},i~^]H{>j: qGB蚁-ol:xĭQwcu~>$|τhqZt}[T :u'Ur\n*-Mת:۫ޤԜ zݛB-@a=+x!]ֵl+ssƋ;F_Nm ͜? NrK?˛dӎ{[w7RIO }w[~_D Ik'}v[b 4>)@ڥd_:4ũ ҶTp, ' }%sh(zgȺ$ȕO%FUȏBsH:UIrStzjVhz7UN[w%wfՁ$T5;;08~x!{_*adXŰf;Uov7Z%٣Q\r଩y Lf:/@(!cS!Ϟs $dUO?HQs*P<Ò. to 8q6lmCogz"&IbTV:/4ұW)nPYf"[1Ԥ"gWasAgqSi߲زO]XO:״d srHMҟo~0% #LK] V;*Xs QK2 .2OjXB*˯JwRzLIBZ*2ci*rGi"@jW#±CM>*q@ɣsOU59(2 Ʊ2/Ug,Lԇ.SS5*Ri83bFK,-'c.(Fɠl K6 ^T~KTP`tkyo>{( Ni; C3S9Sgu=fJB^K^81ȐbU3B>$ĂN -,b86NIڃI"X(l\ D'4ٴ<&eA0|T &jLb8F5 VēGihG)E<0Ӥ~>*ۑse3ҁ\9@i Qst/ ^dCj Ȧw.UGTk >k݃^FEFYrxz*Zy=Y6vo1F5g;#:ICS4tϣm} mZ;c"3ɉ+=vc[>ѨhT=Ɩ jx9Tv][{f$ 5Rhx>: AS=k:rکԃƹPpNf>yQ 3nѾPNr?YЉx٢YzkG9a$VD`/kRMk4] 2@FVٚZJ-ĂiC_L:"!:ƟA?TSqOmms:X#_IL̳d "<}oC6j"4a^jFS=͸UҾ\.?!ћq.YV2?9f.pƃ)ÊY牤LDX$(@CKklT#̋ 쾥T@e^KXwfyFK}C-њ2,4:aWMݲ$z :D#3/̻KAmN$ IĈ<}[z)!?%=GɈ[GVZO)Jq'>&/w|N C)GpC;d/iyj"2L^H=m"xa;ij! V,OXkO J P &Wj鏘&Y}CSDI>tK'8lr#Zi>l^_rOj}U/\c#NZSzc#SA@x ^ׁJN5QaN:TK01OW:Z(O 2zOrl <`,.RHq?9:vR(G`zӾ^O$1Tg.+h*ɭ_ů_܌`&˞z˷4Q#lJfy3I'tbe?h ߥM] ev ]yvU'!aNp:mTQ!h XbsigSdQ'ac"lHÞ.6jacJ!Bbt\H 4XρL<()Rж4#%imN'FQ;"8(;+QVXE]RkSBL#g_Tf/TvWl܂tZ +XwKFNWy/Ղ oP3_L$pk Q,<3Axȓrw@wXN[eHC|.WcEU Ay~/Y AY䳺dr\ 6nWmMW3v6 òϊ\-9X?VJɨɑ 9_e$?WO5cli什Q', ᣺0!-@ jPve Z:}S Ig1,F)Qu0i3q`3r1FhVLE[x+C_v30&ƥ;݆r4j(Jvr6⽷F; I>Rwku_Jero;eӳ@nz1z)8~ k]Lj1QiF_L'е?JA4 Iv7miѬ`@!s>4to(`8 *n#~}howכ%!9h%n)2\~1v^ɾ&Bnk/,(㷪jeWI;~g..h2ݒ|Vzlhu;2ήlՙKLNMHf?=( @3w̜|dEfpLY~Xsl*@1f&d!ϰ7v9:Ty:,lM(YX -.FztlMXO 2GD&->8*k={DP1w]+YoVJMH8 m_yHNBr d +As!2% K$Uc~m&ڻMd%Fkvɥ).>`n鼹'E-?/3QfhKYo~\Lx p*,WN*+Y\0&3+ԋj O)zFap E+Z'Iq \sb8j+`gkh+-9ēOE,*!=$|1"IH},`ܲ.A^$jkۤ7ecbM;!~zא]>_ށ.VxJ쇫ɖΡݪ|Sɸdonǫ?x%C}x Z/2UՃvIJK V7~|/}c96_Sv_ٴwK^X~+˻' 2!h^rΌR[ب|0Oj+'mmFwS@j,@@-p$ήW0F :E"mCvmS)tgn8EcŢ&.Sm; ؐa]2]~jxK&8%Z^LT2R\~RX==9 >1짹xnr7/JR-S@u$"649I=ZJH%?Nuf ՟d-Q@ U{=<Q3Y4 Ѭ` :cOO2gK]0*Poʩrt\\yndFE٣FШ뻔] qKPQGó{b'q)hAr@Pk[]d4pmpzt^ӣ̘} Y+|ԃ04X!I 6&ELS`ic+yӌCv3Tc^[A燅pWի8|e 19P09|01\:cP]_9z̰\K ݫ]pANfۚԋONhMμѹY@{ a76zs^}/9cS>1xE'o0ELpYw)#Ts;JYya6fNʖI)5WX*rFx *;%Z3$KqBo*R mejBeM)cj!hnSڊgAhJ]N; "Ez I6!Jf}e˻sx]Ap`mn)*a7]E-U+"=/0%ʸM:ՄXd Jx 3Fa3$& zd3 tKC,ۉғ~/R]LR6&DtI8ii-ߎwMCGVc1hoM'lmwu$ۺ22`UP$23O(BpeU{@k/֬]S$9gz9_!2`wf4OX6;yv˭e=H:H 6n/ȈYbm $*$M9ނ7F AytVcz|R4,bDe5I y2$`TPXpQ)$79p6;N"JoT)[^uێC|"WsXgi;bƚ1"1NS*,tKpKA( j3K8wtS<w4XvH m)^x>Š6.l0-`t[C!8l #2,8>^2In nh{wy#Ftrjy;ߟ|E}Kj> Ose-EZکxVKjH9PPQHjJaga;dpR+I 9)j'Z^f-IPgilazey's+`T"x H>欄;6\Sr%)C3?Hqp@]&ˁ*RuN3^,R4I#_uUtժ3ʸn: a1;RCY1+FTbU7Qw:0 Joj~"v g4Ƹ3 u.75%cǜBrZ2?.\Cb7I`*N8ͦ4Ӻ_\ \dSG"(`fD!2%<& m}Q`m_> r:wZtܲ|p$ծ-xyF`"(1*I$\B`TʌZp V+\"N}3>lUHc7iVOdٷg Auķy,0 ܉ؠ<bE #'e*(Iqd Un*m۟9Nבo•`eE+\y*Ж3\0wͲIB㦼F _NjfP:SR_ߝΜ ?o=ayrF# 7n,Uy`ۈ获cA)pҼf&^V7>-5A}c[KGgBn߲[_'P2|tSwQĤb*e'<[₤mdX0NR+ zdp|V>[zQKWP(E>"DhףFh&>oT$ ?B:XH[Fk9!zITqɚ+!c`YL8,wȏ Y3t/rQGeC")M >A^Ab,%'nt mnl@[F[ ~SrG qLlWK7^\ϾTH;+W`Q+2|p-EY%/a aE.~<*y;FׁKCs;臇֡5In9PqWÁ0dՎP(sB'Go9$Bu?([VKtM!fJxeyr>u_Cʨp2)nC#}Llj~D?J\T( IHB#R(5\D6/45з#y6#VZx(hA jut]Ԣ[S`j-4@+klgf"z{CZ- {J25xtn.0F4K*8 &13AA*pHKF~h#~nD4 p}.s%\g9F|}-%SMPm)2'TR`nIg٥sw3ZImїx5 XBזm^ho!NFGm!<2a'kNtkF_=E͚z5b_q)Zc+ P7Z77(LZ݀Zs׆9 uo KL2S;sFss1m 'ƈvҞߟ0%k㻹>7~3Ty=h88"+% *PNC͛nNChEE'Ko%F.K1r{Jn1*@)Y|Ёpg\{F]6aP.` /$qQ| #OX ,֏yjezN&FmԉJD/*Kd5tWUoQ2x-V_ gd)!^/RwjAv+ZјZa5kLf)J2ule75F@E>?h -z N4Oe9X壻BHsNotq1j[k#8/ yE]N?.*\pC \F6i߳\fMe*R_qmXB)ag ŕ-̜;y^2^;ӊٹ7qjl^׸jUGz]}5I%ӞOk15'y۲efdf/|LoqU7#KjŬC0!9}CB「soǰ5#tuyn @I@Z@JXj*g(N1aZ}Cؖ""jQ3ubO-T;6Ut8pQ(K=xC2CU Fɗr_$G%:1 4!'G=2ɻ>;WO`IUoOeHzDPS!ksO> 9SSL㔨aQŸyIG".%oDTpg86?*Zh9!l`DZ2zʄૃ~46/V=Ѿ]:Isrǒ*2렓:fK$o!V=3 CVKM:) MD+p҈prNԨEvW'[n2Y_W_~S3($ckE#В/xK̂{.9 MW0S>إ{nkYVhr{b>{u[GM_mϯO<-鮦 ݽe[Mez R q]௒wnty_S9kH/rܫN{O]`A%Z!,(,NZ!)-E32=%b)B{>5ى)n)Y=9[ZO6ۢ{T<:/8dBdA~@7MM{dʺW ]6;㶞$j'%TCĚ^W΢tS Vx޶5M&6MBh@`eNcqvLWZ)ݖ+<`҄UƘʕϾ]SCCH)Ӗ_[[ j;-Cߎ賐A^k2ε E wkq amC㽻Vukl|>%Ǵ]{=Yzr/'͓zos!άy*]IǛGonN>ynkx5# ɅQۊ.pZ/&e KD}uX=yvE|%DΥ cy gr,k5R¸~& ;sez5/b*f@@'[gEZ93JR&ʡ?ޜKO$OlkYw_u[lިh+ 6aB'6FmOJR ь3@ˍ=>x$2HF&}#BIix$ڧ]nM-G{Wj<TP&j;܈|}Np;}+*E!6k.Q߳me;C=qi'Z t*zvL]bK;Rӱuslskx@,EN e\0nO qGi-) yLd$9QRC8GHGU7D[REV}D&d0n-o>=,k`!0w5a7 Ma $ޏLKYGNdk¾Rw%'Q* I338- 2%-RLSѩ# 7:zᆨma[eRZRT5{kL*qo'Z.5HgR!ӟ!.'΃>rRr2se(mfnjawRfZ麱# 4<];e)*;V 2[-'w_ioF!@;h[jYdBSh׶+5Sa.]GȢ.Kv@A]/cJo"_LS;vkɌ) ڽO ﶤEݺFv]6xiѮ] ̮hR{B̌м歫P)/Q_׿uMI:woߴ[4Pw:IWߐ} 틖B͘aaUߟ*cap>2b#qb%s%A~|GN0eQBmq0Vl8X(U48BXQ=N {~|~u|gJ^Lam?l1Ma[, ;8ɺkzr)R|W)xpRu;~\Y3;%܍$zJ׏W;E剃˫z= QG6mHnAm>?6/~LdO_ ;eu籃l^lYPٟ?/]"n߻'?[7O6$8v1JِO'e:H/UjvCn+?^+S r킵j-cc3c?hyz- <c) *q2לI0h!(0T/~G ӵsV#7-;DcP1KWJsm1 ,g`1(Gm&,)J ɮlI`܊UfR5n?vE-х[\7?O]%A.jfi]OkTUyilyN_ΤݟL7NCqP쇺ݴ柧K19dQ+Ax%xmpxK>Mq 3(`kjZnsNkخ1/xk|K#Qp=u PcF1ۤ12ŲN‚!,,§` >xr't6@j RdEET}yOw佥+ t6,Eh˳Bɕ9'v;]pQӌAJrl+. -ٳNy%',"6grIJNh 2N0"m^gkLV)/hB"RYLYe֫Za<2 #CV=@sBa#ALSS0VxeL˄Yk O,2OѻhV%gw%5c_-K>G*U $yesuQ1t򑁰*$]nɐpB:2("9ENFyeɻ;U@o,gJD^k\0{'RbZ^bjiFka40EsTt$Ye&(^GeA4)fMPO>6 P ihH~wę%'ڊ(D) Lg3$=61РX H)KS!$quE) Y  "D- (I[(:8MZwms4%lEQjNZCV|0W+}ʦuSNc>.jeȅŒF8yRżN 0|jQ e\Rެ, eAo{rHZ D.> Wm_ 5ɡirwzw4ўVt&(YnmT8H>x3I[f&4G3޲^C/ۭ]@hkH-P#FY5Z+#9bT`HZÀ̵/^yjh2gͣH6ɪj%ꪬ/"ȌSK[Gh:^6O>V0}~8< Nxvu(4VOevGeyɽ`^~x >w+1uz|jy*ִc_N9{@|5Ks'O땤!6zZ ̠XvďGڡUBcc]{ph\Hsm^j6vic/^WKO#\dcQeM3[iFADd*!_k6费cnvA9IhƯ{[!v1Xhz%Ҍu;f&-E(a ܭx?$z_T[y[6\cq n.lAPYo/o2A/ܩ!|asֿyOUQjjJ់t& Ħl;m,Zbf;3I,l+?vf0mV*N ΆAnD^xYb j1$Bxu3z$[zBى8d\*M'uӰدu1VCP7n/!kMd/wJ~o~XǼ ?=cp“1~}MߍbB#arLk! &7Z߯v}a#ۈ־9Ɛ# (JXS ؜zgh2%awMAP5gzL9h6#B 98Cz6^VrsFʬCNGLN{%gk2f-%W~%;DS Zi *l 288C)B"r0XPRź3 %pv*lEyQEVt?`a;AX1{HO`I&I6,F'YPjTfRq/0R芗!Q&*zVㄈ2ʠ犋+ HlwX$ ''C[ P}OgB։YZpFـ-'d2|Q$K6T˞Rg%*?rh_(F9?QLlDT*v% 1s*s6:h#[0L%HL#Pk&9VzGvk#80rVt]wS); '&Pí~{vu$k8WȊ#Dș(]OթҔ2|AŚiI~q ]aa(ഋ!cry:)Q qRss/QJٴ& >^ÖᾰN=9U~ey"SGK}ˆ8p>')EVK`B69(; ?{k쪞ord97WONC=M^8,gЀfA_3aGCU׬YRxFW-z.^\б?{ƃ3o,̌SbFÌlq.ïQ!-J:w9X@x}}_y}Dz]nn~_?zq^z#Яp&S˗}!b[˒hT1Nl6J(*^'mلjet́KZ/綖& +)H֨ZvLkW VBHPYRCT ִwǰMHЪꃡB'pkiAկE ^GaP:PATE-$N*yĶ'u`}Ij&YZ*Fed +PZXi+Ji -p%pn2? r$[T9~cSe3Y$=hكh;Q8n8]H2*-Ld H Aѐi $?8aLǹ(hީf-olR (>fM'(Ӻd1 .}z X~.tX3h?9?WN?ϟn~qW?r{>ciL{?Oqq~qy-﯎x|xǿ=!8̑ÓË_NN:ec2ݧ?Az)Gż[ߵ nJG FJЎ72RDP*V!)ewLʮ*t㒛;8.IN` ck[(kKBvg8 т"97 ϊ؞ 8ţI#:͖'S6qUF?6{I&ܵ`8.l223qyvWaK.^ nP?;::xeuXᐁlGVƄO'Boi'ӫm}ӫ[7#2LV8ش"p_<dzz,k[ ]{99y8eCO ?wa]=ȱ˧):\~\>$zz%jD;yyã o~?ngA崒ZLW/G?'WN ZM)ړzp{F.  q0ȏmR-ooru}OONN`Ͽ_~z2*ϠX#2 W ю8D@rCz'+F%\qk$fD TVVbA}kųt~QVA`ZKzrz;<Ћ߮5ݤv:=GSW_fG_=1zngddU?_.ޜןkk _ ؇-q@n\AEImT̬+_NVL=8s7壭Y";|jd6A؜7=^tc̷m䦙o \I#ukhO VM'&RL3q>5˛\ϟMYdf&Y+ ]]]UdQY X9hCM.a6P.94b 8Yiw.SH*1*!)V1?)؋$l='ielrŢ14lQLN/l(K"ph#$YnI km=śY&LPҪOP.]/p_ݫzUo[|p?lPy??~p]`eZAU_%)zoW?s/}]?sggؾ_ Ai/?z ïzv/Vƫwi5?9ݭ(H,onr|_xÁo.nF~Umm~W'~NI_OxH5nNG=X8}_mw}aN;\_0jsD?}/v7[*~0fu‡9hk/9dzFhe =aZnubD9y#w='F-vN|C`7new7z>noA昍_}R(|2d@铲3AO '(sC#@bSkJVbQvM@qo>ܢclIܺ ϛ _sGq@%qТ[ֈfܵz;y9mp^l|2c#rji\VkN%tk46*F_ܜ߷'ڪΖhg}O"%zﳮ|֢q${I%UB mR >3I3%\܁WDS)Im@)ʼnR|*\kLMLb.͝+RZjc&RI5ir˵j&jU\L씘s$sDR3*PF6|M&ٓA+v.‚9r=9/Y'`fRZjB$ƅ7dX[ўgrvyS/NOruJe9F\\.*ULmuQۖXULLKvJ:(9~JEkɉRАo$S !Qp9ȽbF88#8 +j%x9[qAQ,}%]m8 $.W!zaFE:^2S1;Z"Z1TaUƐ>6Wb =2@>YΜ| SI{D˝YRɒkT.BKBo+eVU4VJtHr`ݘL ȳ͂r9'E۬(ś$*#YYBr%Fb]0+1=)RȲu79":'\ O -feɜ(8@I);UQT \ }KzK&#er0y$Q:W@;-d`f} Vj->S.O<0d2fZQXpI'C6CJ哤ꐨ&utU£Ɯ^۵bU(j$ŵ>2i(gk_bʚ I4!8Yjd٨wVr͑Ӫdk炬PRh8]X2#<i1wܦ G&{.$g-@QPө*A#ox~(G{$1<@;E8uY:(C5dÝE3fl@~Id -tӺ+즂EW-gej_B*2rD*\eZH>lv`CGb1R-YcR]#ر.5[qRi?u2ܪꛊњ6k,kR2%? ]R1ta(ٔXVa;o !C'Z/F 5M-9!kսp/Ul ! SP$%;þb3I( i3͉$hR3b7$&)2/ϖ-ݐm4u*KĻSn sRMg .7Թ$LD*Ix܏llЬ3C"93/(**KhZfKl8$Y!LT,P ]sȑT7EQzxM#\ H}fH"eJ"CM*8u3V2!V-V)JL 8/:6TW wJ@m5/fi;?RVz.@Z!"pJSA3/Ó[ UVA`Xr1hJ pFe m.OlSYLR%9I$%Ԍl9xi%KL⤔0OM y[ 5z-3tϙt!#Z6Q,@@l:p8?:UaKշ%U7?Ԋ1#QW6Ix Zl*䕮p =F{F"9 HxH2D%fbVx9d{IRB̥FƆpCHSr1b-dIQz*jl `Ɇ"{9OY|!/+3vH98O-vx ˺@+|?RMwӷWW.'Zf[~,PYB4Inj_Qʂ#Aƭs {D$R9BNT*U6w && } 9XI$8ɞd{${!c!ON u4+ȱMܚx0*'!եƫɓLZMBp~hw8\`?  mU_dh=L۰jZERVcԌ,ڰfImXQsa#tP&QeڰSK6,Ƈ^oQ`TQˤ6,Ǩ,a,ΩHxsQXƟ۟`In84G'!g!c!`y ?E< >@t>G'!0GONB5LSD!M=,-45?}$Ԇ5cImXQfF55:,Ǩ2 +AjaԆ5ImTQcHjÒZX_rf zv7#=`|!YX׈XƗo c!_Vm u&!{4cB GC0hW9`G _hp,MB)|B.J+PZAImXQZ-ڰL[c2p0FͺERN.ƨ9Ljj뫓~ЮwjSc}uJ: T\ֲ:_]^*꯫| ^u~Kadh{vVz{M"L蓁(d(8tJ>_Z<9ݚKO5x,87 vC`<__]\QV?Ҍ%KR7Rxz6?jUN*_>p;|jF.h_^ސ!qU}"]ߺ]n$f6owg["i!Jobo5_C]oyr^CXNju&ޤ׵W7ۜU˓W8ޤW˖O;v/!ɐcouAxxe͛zOvj5{۷T>{yz&P|Co=[]^#)I@/78+} ݒEV<@OaIvpϓY9=W#i=ݭj43!*X۷y2٣f{z[{{gmwn/_?o'O6YvZ_Uk==1^Z5ZcWzj-k}xjkNQuXEU˽V|[rp72HxՈgf7] zZktAJJWkJn2.>Xd(D*:]2ׅS)3DT (::;d* {e(ҡ32x2<*, ?jg f"G֔$q3iMz9tզ5>+43\@8++0JK[4%R:\p>l *&,= hcL)$1cJsP~HH'Ԥ2t*X4(m E1KcLP,9fr >YCDLawuuЇ}f76̬o-ZC;aI4NBRfet_I0-1+)MWds&-ᘮX"T}P>6~ Kh^+rr{ }wHٔJ=ҍ3Rr1kOhsQ7(nKf-9"Zcrt4Ѣfx՛*'mHtt*XF:G%[TGMD +ٗe`J Yf S-6Udle*#@&v; 3ELƜX?$d*pf"iy]j{ڴާ֚MkzVkfi{m@+#DO.rg4V2_v|wu֚zk=m|uAqrwf绚];pw/7wՙIysojHI>xp67㤎fsnOw?#_oy&bW~mj`?ϩ}ߝ!9]0ȼ}d=jKvLy49q` ]wnحU}v[Ћ:bf=m 8DGc:CLQӿ\e_p^W#D+Wc+c-[}XQx}]g^#i:Og8K.WǞe"~z5O۠{}tX+D Zl}غPS XBS٢6 HDC9mTE mxtnm [2Iu Bʜ)k 22Wd SAGּy N[Kp7AԤ228PPDJ{Sjsàد-i$ODQfO],wL٠kd,D rRA"2<@8> 7ka֔Y$bvLZ"yºCB-3®NT7ĘOW5Fu[ e~_?"3U߼1Ә0q0e1>u㒲O\@{차'.=TvSwSEh~٧.=T%>u9w\}Ce >akZwSiP|x|Lx^]_z^is!>)dBUnW~3! Y9{*7{QViM=F_fNګ_#MKy+P!j'k}Js+DǞJ5WX31kBs/55[+Ջ$Rk$kqzikWj Ն|{"cA~{r]{=zknuCCJjMXͯjMq,``ײ16f֧Poj-a76 T%9}L%(ws[=G=nzf5bA1,M»Šң}_^?<DM~R9C=sZ' gI< vƍ=7zmI4uXlYj5x3n͌^I[zv܍o$ZrzpszuEv%Zـ: YF3E#&+1W*{4O%gPR;&h:ށ>+x;|pݗnf|4 +YI}})VX6Tle'_Zu!7n\b/1VvLo)uR[3NېpAaހuH1n19P4/ Z\H ̫dvJ>Z f2EjV.^;hd-abEc/*U HɦALFDR:KV@I2Jr2R"ġ_<(DJEq*="Y4r(]y=H P)82J G&R‘fp!T Q%,ٿqEG^gUf4%v2-uOřj!LM<sYyJ5r" Ȋ>ڐLb)xffERGg Rfz9-(g/ZzXobc밥&DF]̔YD=^H:rD|u];ԡeyvD:ja!!/PNZ6'![*]JQK >=5P'{ Zuv6\`Vz^`7BN-Fk{n(Zu:m ajWj55 ڗݦ@ȉC Skzݨ; tZn˝"w:35 ڗݦ@ȉCSNW&8`7v9q ֪>imc-Wmm|,h_v!'&L51U݌Tr ֪>imc-~6ힽ>65 ڗݦ@ȉCtvk7Mh hͮcWU}v[.)͗,h_v!'L9iWhnz֪>imc-wxmpufA9q`[2lr*Zu:m aYUvNݚnS !ڄ)Vpݔu5W2Qc;YmJb|VA{MhY9CvN %}F-v6mJ35 w!'LN1D>`7nn֪^ttFw5iϙNh @W,8d7ZG6o@!` Sn͂e)rm”qlrnnn:ƠհMgwkwF9q6aʌd'(33pd]a8v C 2YA?M"*k1IL 2Dq±URVZ*u)ztv{¡?.}f>r7A( '2cL dD\uIvsbdK누*"VH6h5Vj;l:\!N<1(`w3PP'Vb8ymQV0r4 RL #f*)( z9x2%;k8_DaǮd=k;A' gP7Eo v|q UȹQթ=U& f[}Xr6 ˙A1 &aF$=O&eV7WƠ8|m-}8Uq9q6a #vRTm@!*Ei٭Yо6BNMθuna;č:ƠV3M|,h rmwm_Yml7-6AS&:%]IvsWxԒT: ]X[_4j9FhS5Όތ6 !O\EF<;֍4[_4QcnVG=r3z3ԺА't뾳WZ7F N1Xx]S>\fڛѦ-:4$?}sՠ;NJsLn"ܒB)ޢHK)ĸ RY)b 'qQhǥvBo!\5C\5F]WR*`E-rJ9T" @c# 5‚.W:Xj)sFpK*%hゃRp5oSBYTs6tνRpP`D-8 @ $m)4J1Sԃj0y^´QA Hb n(8;',9))*1ؼo}=o[ҐyrN{[r#[dDIu"!t)ן7]JLbj(j#C4"AaHǜ"!2uB+'A@S(†}Q2b&"ba {X$PI%7 Ps ^T" fw<,{)ĽkwVO(*rSENβEĘ*I |;i8(A8g x%pYҷS%{KA91lXеF8?Gb џpK)@|lT&D/ ID&P^<|lz+g$o$ő\5Ժ7 iܳLݿ\JYׂ:~#sе(ٵ,I͓4HG%uBE"-@a܂CQ2:MARɟU1cpȤ $zהTihB뫐ti@Bt:R@ q,h&JҲ&:t:DTK4igi`{jm!tM$2oBiZ&RŒhA<: rB-d@8Tȩ$&(dCb8x >OdZ1;g+3+笜/:- ;bVd:WdgkL6slZcuUKWN^@_Y9yYdr~m5.Mjm1X+Ӌz!鮒r2uvF r8t2/[X\N/Yd{Nĺ~͛cUط~ޢPSA0Qwv~~>mh@^b֓gg/pI_VO#?^ՅϑO9ʾ{;߅ړ9d2?uq1YThJ3v/^|}֜F`<4y.)̻R߼ ]vcb9v$_{cm4!Kt_Se`(RgSdW#}G6o8 NO `: bxNpІRĠ^z0T[,bEVt8񴓲‡7-3aH~&~^/9-ץL*J ZX z8 VӅvV ZZbP^y7>Zgg]PN>>}O-]fz4 ]#5+{\ uƏa^=z. kZhDbf*bӫ_U,R0ŋz]fGYi@&>9_/'>*t#x2dcupF2_dB'Rc,b8`|8BO6, >o[8߲L*6 W6IwW;FYy$4{7n"||:XkX0`]BbCg$[l} 2O93q F1ɶvKYcg/YAUsW:{˶ _}\ݫ_W`wˏnYTL/m"xlqXO-G^B ࠘`7յv_~e_>4o *61_?42@[}L_Zk*,@Qt)]7hH^;& OEhK-5uHKw(lV>$C,f} /IbV7tg@k^FIl#c|U0pE`M RZWw.!%_UMҸқZOX5o vu Q\[P&@MUxpn!7`cu{6`Н!pvշfyЪm! c)Dq >f@䚎uin5EC@HWg2ԋw=V6sXF5  (;LR6x, qj8&Hc&c=# {;"UEfJâcqR͑c@P>⸢@H,7I§GbfU贻!!kG?A:ŵma~LI#Mt۱ʨ:XpvyWd[.HtW(cDJIO MQ3UO/8tsOsLpJ ObME=Eϫ f)ԍ *9c~Ƒ||[j9ZY\Cř"L=sE.8jKRNu%)W&[JJB,fqH-7cY8Ͱ$Iʜ= ?gcׇ>^O/Ofl&fzOBK(}h{H] 5vI 8݉Ecw(C"O~2N*C#=P[ ܆QD) z'{I +Ѿʺ =:~i>ϛ ^oNP-A EQK:΃//Tɂ (>vrt` *YshGzmGhŞOF/Q qCJ;9j:Z^~ <ۜ1Pow7P$@E!$gR )rUꒃs& &+̱Bh!C5E7: XW3w륱]tcv*_%_$UH%7?zsRц$dGV+^,]'09_3w/60`&z {>q!fjV&?3J췋 A mjdLOfm?j+9})(uӐ`k*֒6j~!ƵGqUZ uw{ ){1uf"/>vuU>^tn>7J!NcV+ѐyq"H DZ Rw+ұxlWbPT* 靼yTDId5I63 qyCe_ޖ55#" o;~WU*%my%9 -:Zޚᆖ`!et/v {UQDvVGJ/]hsG?L pMhZΫQK -`a8OqK߃Dmo&v뤄%KgF+'zJrDS+I{wE+Puww7No„ s7߹4jw.@K""1n/Ewx/QOFmHRt[hbH[Áv/E'9 LazIkw6 kn)eKdBQwEu/PR(8:*;^;|OwFHpIVmLKKxllOv۶4fխ-,0뗶|bT03k5 J.9B[- W'U18 p1F@ s&t" M3nM%I;j920Ru8룣?0=g&C.- 61 9(1#I 14i`dLBJS୆Đ'RA:(g͍0.K\*2bGxtJ[c<_|I$gf2/?)IwwiZ;+{v!@@# yP˭w?Oz⮮N?&6ys\kWsOb#p;ۚ5a RfHX>KNT)P[hP7eX ]gҀutGGq/#~]LcwxVgK\ksߊE18?2ڨM ۧG5u`Qa)v~ 4]W櫑lܟ׼|K’e69\__*rj8bڪ2bAncf*jֆ11Mwb.>2!GXSΎ5'v}5 UeAKΝX{bm׳vףo/v\em'rj9gѼr@m?i+YCiL7nGz_^ 4!K0gNNJ.7A]L]]IϯNʔ=RO_H&,.nޖ74~{}& )[ݛc.$wv}7x&rEs/Jy7WbEN 7-k?-[9}V&[\zY+N=E`-&s;+zrsoc9#&n9@k u}lfb aQ? DY}q-^H9Gca409eaI`;X%Q<@q!ѿG%to64t˱K"$@ hLKMp}Ld:`GI$aҷ tGiKz[8PX8*HbqMnʎqeKfzKc_+x)dzt}8~C#`ྸ2LJ9Ssjd*O[c̉9pN2V0-98{O-. = 7u-wlQF.VXbIe$&3?,}epL)GRuPydsDl#1J2_OGfa:0^AC{@ +V0dXXX}Ez#h-un%opy吃x #O jCф> w\D{B; < ّVx؀.c,e_רּh2쮗qG]]=JGir+NViFӀ X#H[bRS)2"Um 5ިC/B`'-:cJ{0UAqa8OI\Q_ g# Bt1dN+$&I{RH%ȳ6 >M 8[خjVJ7O#u9i RЂZ=ŧoggl=dl?h R7@#sE5ڻeF :c[5.VNΌ4Lze9VXfq ZzecщsQL A[|[Lgf,w Pߕ vyS[ٻ}gw9ϓ _7_Z% ˠ0ܠt0F?!B{[GC|qWHF $>%CLK`@FipְT 7T54?-Y]V_-ZG0RULK/v4#._GO=#g N#h,z3υBUd'YٓV#Sq혐 gQTl )٦=֭}~i;6 7<8Xm9x ͮvn㎇+unY1zE^vΙ(v\'uoG1M;8DDkz.u0k7eGk4խ|iHK?ֻ/֐pȊ .܈\_&t],%-,r[3J 7lESꌬof21x7S~,RH"Rz d^LE,C[ c)b@`Ll$+MIDVQ:o|/PI 2$f^m$]/9>Idr;|c6j3l5H?RJuqA+3#, _:]{mɧ iB9}Vyݏ.ZqF+s©uMc" hY2]?`^9>&0<&cdhs{Y  ,&h) m[` }60']ȝBqg 8*DU2'O Y6a\}ՉǵP{i$ 6er11{6ǝe"W-&ԀvUFi~Ju>x.@, g iM_,Y-=~S)`wHrݓ͗"Y Lp$Eh,,g3{$[m[VK$lf=U7U}" $uxiH #3) !mMkKH)( xHR8mIU+՜~mUSǍYǠ <Mq٣@Ĭj3!c()$O'Mt8i@gZiVDNK(p*!jRRRm7L+B<͝.^ʠYx4)`' 82j!4< јhg4,ϳt3 n;()f„K*{LI' 9I>&Yr2Kf/%if^Tq4:qפR4PȒg痵s&p.\)HI{Y0\PjQgF>Vx~YQ[9%APb0*ҕ%weWOWu'2P0Y,W H_X#NڂӜKK~VwV$-_JvhFƕcSL $쁓ytX)f;ɳWӶHroCSx>#Ŭf1ށMu27ƔcLսz )cNH0|9Qm$ш$)%DG1(lۧ1-ƠcEkp Bb uz1@c'T**/$gB|&nˆ` wqRL $dZbJVQ %KjxmgFZh2KA洶{dXrhs>p|rzd?J^6; N`5zPԧz;0~ ` >#3CP s(3 =AШ6Q{@Yi_ zg㞍Lӝztgw{j8IԪ:Զux.- .|wѺ]r<څwAm@퍻PǶ~Pw ħ6qlm 82w"mZ}>ZxxæP k[n?QWq^A0-wL _M[Z(Zi;5[LQy4U{bѸU5Vפ 5v2}h=bh55k 5G N-V>zQc6ͻ]|Y!w^۳#Vy붫мWw@}!T`G&^V]\dׯ>Zp%k%уsq4(1G\=q*Tj풮Vf5 ή%rKh{30 J{OE585 ]eI"X7Jtv~sQ=-^xck$ʥůnv1W_B[mpzwJmD|'oub#EڷޮR$oT{ jZӼ\N 5VئJ?MCWunZD/{VP@Olՠ&T#>˞Dfk{7ieT!}XwʊY~4^rHi/@Wj[Gtjye`ga^0F@#$0&+%g,` H 3FG@ T~@ b7kZv5c.ba[eSwӕQLTgm$4$t=9[8D[w<~JJa@]Mcf˳M(?Lgyit9E݌b {Mz:/>UlX?WcWOۥv+ }r慠4.Dxpti;'_/lZ0tf%`c .:!\zgl)=KQ*&]7Xy~$\DdJڣϵ67PbFtjE΂5l0V'n}HȿVɔ=(fo/va%h(ex2s_le%2#Y6^: N%ٖsL Min|]|t=qy}tE`ѵ޸.H\߾/i'#tnoGeYKw4ogE+rr頋 .9Wh,^On2_d3^hnFG,:W4Yw%Xx~?]V Qt /gE~ >Y|Z=TĚhz&9V f &ȱK$gLEGY.n2_j9EQ3E@<ړXgn$zIŧ^TA 66ݻ{YEؾ% OiK;!V :AdHD&=z2rYy1(P'SyR"]~ 6IEN&&}Lv,JElN<ڨ}oQ np>Iˤo( -ydl]h{ř#G)}\;AɅuW?R:3ˌ*^Hu8^tV^Y6Z6In<9:T$,Q0}jG*5{* =rrf='^^)ya7,RS\\ϥxzs7q+?pFu7/7ǭ98{Fҷ"<*ښ"NÇ4 ÿ2+V-wSJvOۯJڥ|B@ZbP/Sqge~ˋ/6ZpremRqH4 2X4QW {F6oY ;@l3 ǶUXێm8ٟ?Њ8~AE,Zqp;>sNzk9mׂ_(z=yG\xŖ&^7tl!w~h_'{eEt\ϼ4]ڠџar22[Ϲ6y+xFK$BHD(!e7h4"cVȤN3z;a+5xȂ൱{Is1HBԵa&KCIkL[S2収Q/t/%y>2\nw~_y]Żmz/s&7⛷5,KGXk}<L1㦾F?(_n+6}8quyF+_x򝉻yu w+$eV(/81Di%Y~8nzD}EK\iN9Lj==LU*hgz6 %\3,21}Dɣ? 3\8\z  :f=B|I;/ 9F-5kVV=YUπV_z}m{rN %?"P+pJv7g{,G.֛GXqe]L[ }__ˤPi*i8goιllȚGWKqN>-PK]qxbh\s||\0%S\!gIhH2䕠?D#%*Tr\ԉMFQC U^׀>{KC)93ZMMV^e>|o#)g{78c{>oՆG K9'gDW<~+"yokx'ґzŎ>c}~u_;&FGB q@_Emv3tuRִw'T PCA'g#*a3w$TaƸĬ@,:(u?KwBM{xm6<`҆v*`bb:!(xr4YFIi^k#inYh4`w}_K`=[ 8 W`F'8 9:2В$VpZ%JAmCiKjp q_MzU1;SeFcЊKtIa86i3ta9/e +VqPYi@gZb&,yaJ-+Դ4I'-aZP *ݨiI9  (h2j!4EضB0]!?mf֧[eG2&ZA /U(5i[ TdHE;pjؐw'݁v(ҌvF3Wb"uwo,Dzn<%|J7ބX(i_uE&(MDϤ [~w}| Sl>ۏ}V9 -@W<}0bm_JD{/lm|=W%5nfpuFyi3[K/]eZI%0hs`ZsA YJI*1A۞eZZ (^ą<%SUNATK|)2w(:Gy: J1}FQ8]_g t]+C[noŇIlj\in<~a?u))b3w׌/0ˊӭǸV@2AX'!ir䥫5ȸ8B"[Z"XɪN%6~ϫ{{3hϢ ˜&Zr6h<<9b/M6NmE,ݜXle:qâ.@\ӽ'c?)9jAT }mFF0^7eX8"ݩ"80|J'/Q :3uq.b4[žŝzatg6`pb u;Y9DE?#V밓69NJNp3{cJ '7pސ;v8=k՟< uW%WY#rճY˺pl(_krymwPaXuJ/l ,Xyq$'ѷh[.i>*z-;譶س#߾_Tۖ&hi,}#rlph!Hǚ>cK|ydd! G CY{,"`.a)$LiG o9Kv5F%浤dƣLL܈i&*k## 6Ϻ:3WnEcczl dB/P 'SMU7Ol@QI=O!{e*S-WKxag >WOXYilk\p,pޘ{y|$Uw/N~XDO Q7k`9!Є~LW61|O3 ?w&$ܥ~vDakKXLD(@ hf:"XHf ]=qQ*X$"i_㷣C?\Mg0th|2JߋO~SFjWt PgAXuB Ԥ.u`m6ض2 Cb- F쥆!ƗkRjhW_OȀ ,%)PYR6R1q ~)ۗ*J+"MlɕK`j5rL+*\ җTJ$}nrk3Z-ܬBP$)%ڊMSZjPn7L`ZE AK$$UqڢE_B`3Z$w\!Z)r[.3nVJv.;ňH71#x ֌K)HHM[>M,8y'KcD&8Bx`l麧f=ZB*XaUW_VBՍdB+r*C0Gk&"*תըZ|`9JVʛ( Sl„Lf3a*ʟXZvQ`ryCbI z,Н='}H^@ Y:y؈7.CXt0:xf#1j= VK7d{iXrB"\WXYCAqÎ(]a$uMb$&ņM$5{sb2e/CLq%ۥgX*3DZHq6O+"Kn3TL5n<Ǭ*^L0C')߇sdџ@{8yXKo~WL8b3D%`M4Q%N}ZnjSe5*{ EHKIL}[PWK[m ؼrWW.p6P!-T$U-^ 1YJ+Vf{8 oXwDJ"kk^L2par^//8Hz&c1I(fi$5G,}bUA0\6rɲѰ#)rc#= .xEƽ&۷4._"s]"83b*|8IV}X#UNRE$_M&!%R:.qخײ?F`0AԺڍtt|URROI?,^'yNDr4mֱ|nkࣲEQk^yV_RQ>8(LJ+v=\rF#HfQ0Caf9ɉ RDBE] &J Em_5C*ِDϪ8= wjN@rQl(JvWdPH*$eLnlK$ T.X LAU<^֌sKVIqr&lk˙Љ|55[29ڒ32Ţ7ܳ|J'V:fZc;ʨ{lJ(h0Ӛ0֚׋l{m T-|S$Aڔ I /ydbП> r(|J(L% kϟL9' q7g1D5 0X6HzjJk0ACf [V I%@$BHOyב}jy2`/5rCEg H*=> %Ffyg-w3Lɷf02Oڶ̄WP~87$M(GuG%Sd0կGg2f~yZ~4ތx3BmOo];wvY,yC`mTҘf tJ0>{k'A0 GKQ[$861ܛݛ4Xc ehRw#񈓲M'ޤ4&,eO(6mWss67 i^z:a)<=Zr3IcĞBZ=*Bs t5*AnkqΑjHe19rG(/h?blzoZ3QdЃeV,T@e**0Tl1ߛfߺ7f>W&[#s9aq0?>g9 ..װ`d.HWD~P_ ,iu|%¥`t's:D^5=tWWmkNT97ҙ엿ʍwe***(6Cpމn ~Ћ=}^wLX+"?ma/ߙ'w֤Uw}< VKH?l>ߟ@bh;,%Ez.±?믠ob;9tqTNЮx\WF2Z7cYU>øjI]'x69V]n[O\fNKv~r h$JQ>P8Zֽ0nO_dT֥jݰwI߸n )]Oc_ܖjR#Kb;W IrxݾsSm;;*@'Z8[%<M^֊hj6pŊz1*Ӿw?z;6ތ #Zw11ŘTC1`*i[P6Jx ̉)|0/KX=W/kb %shz܏26a*3[6O*5LC KD V 6\0&Oo߼)];tr˶=(p( 5q@㾗_SDTe٠?{׶ɍdE:d0`=x۷Ԟ,$K;KY%zd jUUy"q].7[fg(ZOto'l9ӗ/|hɗ'kggz B/Ff"^Ŧ-xQzkC^ ?JBDŽi_{Wxeeu~Y0O3ME~n׃?}Q1=fF/~^N1V`Gޜ .zyG!^_ѯA:xJ.[(']ghkݩ$%9JK`&\P41^zȎͼB( Zȭ&=>hj S V/ڤ\?L6J 1̹S[ߡpcpA·*o^}p# gis֡e0l-Ȩ-L]KRL0=76^ŗi?}:Z{6qId{>eQ-xľ p]Ӆո+BCk(o)l9 q Ŗu9ѿ;_wZ3MSWc2xLɀdg#JQkߣ--yNbYۤFh4 &hd›O_QqْW;",Q-R>3ʖcDW\ QKRa.r4|n6n (Q:e& ]sbQ|T[Nc.&*"te}. EbQ?o#8倦Gĵ$!Z.Yւgǘ/.Z+9sB@-u48d;W\)(1utsHة @1CJ36%ʮq]Dkq,V[/ ׸E E(:ovK/Xzظ!g߭:ɋ52\8~ۻTjC^%1س~yUݟ{3Rǟ󇫖nto/ 7߿U#]߼QF/@EO/WcyOps@j3ΛtvL's>{GoJ$bܑ(-4z 䭍,P#u-7T7]qԅb4W8amA@ؽ1]gH .ZRoDZD)dO .vkf57DFaxREK|uLt\ixT[5Zum6k*! {mj'irLW[9EDҾN郮uu Ucz *9pBP-7aj*m6䎳pm0jW5' u!paow0zX͊X 0r̆ȼT-R^UB>U @`%C@QBf4AGP̛f&pFmUq g\:ٚ>8 MDftpxocqTlS]-)n4Հߡt '1XCXۍNTLa")2B-i9#jt]#=aFO {vRKXLI1S('Z\^#q{r8!5R+8GB9A E=r*I@LU9f<`-o;@|!pmȕ"]u0cAQ@rK_`7h籑HtbޞjG{IJb/K[4E[AP N')<%ܲ<p~o5ObW4U+.5*ZlҢLηYNŭ;+suw\JwV)ؑ%MGQ 7Yi&@hU÷^_sCwkN%ݚsuף9!zCbo:V2&ƚ [Ɯ_;Ss3vb1ŴK2>ćf\H5G mrGqޜX H^#DbMX\K`Ɠͮ5 6f"V*;l䊯VA "XK7͊)"`ܛ*;F zYUad햎ЬJl&<{MD0z ߾~IZ׵叿+vR$|x<9> |3~W_ژU|mOD& >텹_Z~_~nwoW9n Dž ^E?9Tl\b\(~"v.:KD"^(?-"dFuEBqd;&JƊE2ׂY='al< /郣VfcDCM+|7 Vc>"GoN FÛr|Y}밎0F:}VNO>/j\^j;v—!95#_I8}_%)KI/mU[;$oQ`ZniY(%:)Fl&{ fɨ$t;N=9C;ghf%r hON!>KXo;?Kbwb- n )-}jݽOmc8iXnQ:kn\R%;kn컀wT=[(LqGBy [J>1腵̵4HϔlCQX<$rչg l&v$캗K~j.J #ӳ@J&%N#` ea<[N'ܞ֪<&G9<]Uw99 /(1Q.URjYJ1T_ϣMӑBy}fH }p$kwV ْ"t֘J!N]s4W(&#L.} <౥?ZjG&=2OdwFMAZ&dOqQ݂Wu{,3I|vRHsOʳء4qƧy]P_¸غ6kX]70oh nwigvm^ԸL;޸nA,Zt|&}x]lд7 >:Vm{8#Wk6 's'!y~bӗ PE )#`m0&{& S*,q:YG&s MujTM{װKn>oE1mCob%xrݼ ݘpLk2em^%G%rbi›_Oտ]]P6?|ϏXY_/ßOA*Ϋ'Rs?ޗO̮ڗG?CGV+Nr?=VWQҘƼc~U$Zu\ uwd&%gG=f$eZn{o~X˔֊j-kgXsL@8XyF&_'/y\;@Q8t`XuAOJ NnʶH{0Lu[& ԯuJT=.dN&Atowb4+{}k|w){? 33I0|<30ӟnNiEOY!'5%ȉdFQT( zTudls^wRpNZ9_ZByʣWWwUKf^9`àuNxZ|٢O9x*%!+l~po7WVػ/͇Kor%dgwlgTj6i{ML/MTR,۱5w_𖬶^ykܑ=vؓp5ٙ^dgzEv]gz _c,(/h'kVzŲ᧩h&KW?-ջ-#<*?_ }Wi7|c>nwwgC{ {Hkƹr>vC1d-%bp{-$}$p"I嘀{OMtquC}":KN[rbI sm{㤥Fq>Pq sP1DR7^^n  ]\LwN+\#6SD[vެϫkWk] N4Z2k笫9f\O3IEm|`z-(Kyל1YbЦQ}K:d ^z̔8S |ky^$ڑg|}½[ZX$UMqI Qyb:4HR.UU./To` [|>J3lB#8#=NלV_ =1-BQPtz`5l#ӲtskRҌ2]*Ij4ki> 'T ٴx3kY 1$R͏K '5Kbgo,Ic;-/"W:^R+W : _MJ|eH3aքG˺2W'fPH5R1iYGHTe)LjK \^n[OU5Sޫ`^ZV?oE41kA[b-k˥ZGikM "EiSUtbpLMՔu_pF.UW|Zbˊ$QRxb:9կ*FRJZag.+u#PǡEX؟s_%NE;Le5h xDz(rk-OzKiJ2WᕝVZ6j:/(/.{IܘiYd:?'*R]xS G8u$^3& !JJ51-db0|(uH€u(= q'gIij`L%TģlQgI#WcIrbqv3-d)oi  n5H!3X|aҽmӐr ɶ 4$hE I"7LI !+/5TB:igu hZ=;!ՙ@v'ӐFw6.6g ҇b*Ґ)MYiHCKt6WY6U| 1EBM ܴ֠) U߽*Č~=[,B/.l:vW(`n,Yh?30"n4#s#ҚQbDӳx#!jUNW"mpMl k,$FͧĨ4mMVr1?6k,| M}|? ugvJ`j ޤ0; ~ 2tА^@9xWMC|4$/}xrl&hH E4ʣbNB16F?Lg{p0 .I.wf@7<_ .ThriSM5Zy tgMA "@[ z6YU!WYZ{{isu_pvQVEJU:ZL+`tM7NC*GiDDC(%jy=!H˯ʲʯJXUE祤D8, c=_ܫ#^ɞAS"3ayܽ$/%o3:[9/Uᣣz Fȥ];)&_>ʗjs@@:'O:'Q цV$wPٽSa3MqLeJbk^yx'̌ d]ܑ_>vؓPƟ?ҋ}\ "0oti8w}}<˻0{Ci3SIfcJ*a<h#w^pms:P")di b|xP9Ē`⴬{eA0 GiY?PN`]ZVEQ`+J7\jp:ZJ"MBt#w;ChYEu8-JiYc;wQdA)\Qfe ck,=}5R0TV-Fk:\V3c_J_v%/yvE^/>EW1|H>𜯭Wx \92sw1mX_}&BrLG⨷dHРp(q:XUIRJb\7}9dgN?,,ո6*`h!D@ XꢱL|ZQpy<\$46hVk K!#S!-$3cZ4}yŧ 1;S)˷<Ĵ 5>I5Mhߔ$8\8#<0-ć&6z!J[&P4栄'cH52+}DSqCyXE1BKޙ>G:$"ѐlly2l9h^\\h%MLyHPDykі<1<`4 0m HL)ifE)x/Bu2!b Dc=p\@Gq{fFFal}KP895)0PH8#~ Ja.58䙳F8'ܧRÁi mH 71Kw"FiPJ 9:$Fs^E!$tpL$Ex8ER)ibH.'=& c0fZ{&@ QV紃_Ҩ^Q;OpmmLj̺SAD8v'6DC` X0ǠrPi8E;~+ļe`1*2alp@L$%p@ F8cʋMεw;avsy{tGG`"(sa"AgIߦ B1Fd>`U| Arޥ5G-Z<_.葙D+3yi;{PS*9,!''n)e\v:CT&riD' h8M_^LPɎjw[9]OZh#ɼ}1h}2rP3)f }ܘ˄#WHL%oe) z<_9%{˔p}ښMm)FǬ5ˤ O޵ƕ"%Om~l,6@&ul#9<;`{X-Y:խ]untO9"ȏIFgX*]_!q>쫓Q(M"jv<'9*-I(diU3;F'F҆q~u7 @x6́(ǁ} YT&)  H&ɯH;j:ޤ7PLA9)xY@l\R+a)YL(cm"`H}[bo -l=NZJYym= 5J^->nѪ)idUY A$E"cPn (Yjꩅ'(wLZQwޛ(X@ [ ,{z9H tᖁ>(G) GzeU et ~cn۶G͘Z4m82!JT("(dm'`K Yd,+ڱw۶J$JM+WbvLMJaBp@b9y, s3z̖>gk}eyvB@y*(`r!Z,gBC55;C`\u-E5^ w[z.*qU.MDت5neY%$R<–Š4?a|q5mF]<[e:Sˆ}a"1`'9$;w:J R]2){r^ %XБ~6FOd*qhoq#K]C 4is(2|hvS YzcrM#鳷JDRtmmiZExGHG-}NJ(",I2)R$鈳 v/Q$g$# o `.?K )QD+ԷuIEM*>ʂM8!0Z$$֤s Qamfe1=qe}~SSŢѕDh1& FdkDQR@`0Nz6ߥ+ 7 Q=AHC.z`ڢ҅U*@N͒Qu-DG栺V2$o){l45~܁[@ݶBbѧl!Bҵ{xTFcTV,!83E QB&uo= :v{NSKAI||1I`<"|bv=YNV\_HU6/s*1JL4:Hy-ۭ28^kL>Y%廾 $h^]^\^z;ǽgnOo~ja%t.RN kא'E`is.s'NcH"ᔠ 2yt3&ldXvg⼜H5hw:C#.M7)Hﶙ~t6ETCI]qκqEPI\!$Ja+PW䄆y&DybH/@:9I|Z''E TSi-T6W)#HZWt'Q[z43yq".A3V'} 04!hn^='yFZkf[GN;q}#Rzfڛt@/A;l'be;Az'sOf2^nsmuțQ+&QkPӰIԚ6Ԍ$j͑BjvA܆MYFDoB9VPkPך]66W(5GOz?0?෴F/.n8_|_Ǹyo~9jREʎ2cḩ֫1tkSDmCYέaIPkyPz5mM> j͞ujmVg 5QZ6 nf 5MԚQqʌԯ7 Y-$,A{^RrKAvπ+m'b3^/E;AiA{^Rw-f@r氹ye',kaEwV(D!oCMM÷&9CFh5G mM 6 j!Ljv5G=m9IԚ6p^nslu>jHNt;}^[_t׵‹BElEww?w~WiFqҦ9 ^yzmL; lJ5*_h<9]r@ȳȑoGLr/wlI|T N'AktݼG#? ?򊡥$Z;5C'}N6b?uX3(;r=uHQ(?c`Z-9[!xXHFQT@qhw jtlmMM`e-jS_~OdPR&WRQy):ŦD%F'kS͆Nk yԲnIօP9u1Ś&Iwh0jT pQqo-T'ȫBGγimX$TfwȰ&OIlHhBn@IԚUjnjp&cw\|+%]繟,\z/~z?M-;![t>dC_.^WK^|䧼M5{]]J{2G {W@]@޷fi5F|~kUȎQ5(Df]t;lL$oƱƌ߼|흕cɼC4CYHvDȢ^l5( d=f3?lxvikjg]j4ZV}0~k=A˟ b6?_w^ ݁{P/KǾguaډg&z/qe|uAʞmve 7m6q͠L9i=Go)@x}2LB'|YlYA31׼zxD1=zʡ9f81`/Y#q)񠞓igYiOS'ӎKk>?}ai9gHk>,=׶-Tƹ=2 78\{1r~w"}1pnzvǘ_v stN)b?elIOءdy/S]WR$cwr<"r[j**;L J]˺5GmdLaÞlI6l#Bgn*Īm8P͡k:&"()f)[(w> ] ?ZPfOrH4Jbоx%QL٣!G_"-beQmIqfëfa<@, HXBIB! *IP0ut*'/HmL+wu=r,N?!h@q8Sb@q2sz)SW}g9}jgf9:_0QW {FzI"{I)ŷjϊ#+zRJnEg|^c$Oc~C{b(!{|xb`s/8"ۮ)Zʼn"J]7-`mªJp_Fm m4U kD[@.km\2AkM9j볃*ڪǂ᫒ ψn˹aW_=CdD)`Kc?$̖n^*N$}"ɇ$>M%!69p."C&5!$"m2E;yvC !j7 /2d Zbujr[h&Nov Ss\XJM t\ۨ+3"6 52 Bl5C(oJ(hʠ~eZjREQB *D1)HFJ{MO `fm7P1i»0ʳNA#9kWjTaQW]HWH5#9Wp}GEJՃ@8˘V6ޓUڹ, 5!x,8P5h*AqKJh^jm/X[؇c \ú}&fK=akH1XPSg yRO'D+"Ek)'qd35U˨K;@I*7l$ozn u8RFךl/"e;ױ^ߕ!Q!f{ _b@RQd2H?߫߈E?8" P+^W >褴XbRjYfG(bXݬgv7Y*w$ZKkIʟY^߬gnW}?U**!}_6>~Q7^_ #=bP :0X#)HK؀߉ch7AZqZ7տzqC_^?usǦVٜҶ?VlYGic6ZjT*:4\ddآTX$huMB_j_1/,FB}H#-Ԋ>j!e'1>Nq̄(t7Pd!"c{ ŔC!H>̌ /(L.(hXH\D@;Sm/MoG?x=wo'}Wz:}㣿uW?>{`l=0n|Ml]v=0.D5siY/sLUP3e Г!tDZAV{1TPUBN2u*7+ޔ'E .b+4=7f.:Յ$K1r]9Wtm_EOP9L(R-TRaF΍ֈ0866C ]SL )M>5e:< Xw!ZWnQ6N l*{)Ƞ"Q$[]D2Q}edj+Hi%5, + hUwcКOR_+ldT-_>Ʊ&SI@"+/9/ f+S.x+&g.k%}v#:GxsM>(5o|ԋh5ۥ1_,*Tt)/ ~x{uZ><qԞf//CISwM/'ݕ>uܽlf/wwc>1-CrDU: QdϤo럀yP?vOOh٩۰{mBn#1pA"w=}9vE iumн6!7!L1cb,vsF`tFq]ѧHrmн6!7L1|ytv8K^ϧ=鞍$騡8D03g;>vOO_݆ n3rԥKg9fqBv=}9vE 1 _݆ n3r”XwF`tFq]ѧHztн6!7L9X{h3a+'>5t=v6t/@ȍCtSnWpnb1g:3m.SEv5t7M@ȍCtS~)yrG`>gnvӤmн6!7!L9u'?` 8+w[k{; ݝM@ȍCtSw"\r`7.]zctL' !:ppW<9Ċ>5>V,;>zuaCw:&'fbVa@gj+U6t/@ȍCtSBvK^i`-RBjXxRD)Zmi:ljY[Ms_PҒ *Lc`MmK0joSg+#K_/$VmQjnm%_tu&UkS#+i+v, [@YMR4->{Db@b`Y1k]U,A ~ÁRK1}P$Z'p.$.ITv\j .DHU#qNyL ҨLX,?yO CݜFt ;XCL$Cg" `dkFKE~ّ~'ջIY_.XZV5aV[&{,RX hݚD Ј TZ `x^?DZ< X"wh؆0YG?ml%jjVKOl-8`ِHa-k 2uhuPNhDjq񩗀1:XD؏eB-Q\ ")l(#2k0.˅X&gBHD}ŸqNAQC?ӵk!嗷Zzt!Ä,蟕ac|d\VHEĉ5(`>)@x0r_VcLSR&"ュRHHl63WQ8hڲ8v.Sp]U;!7!L]k\QўC;=}9춮J}iv6t/@ȍCtSr y6fiv}먰guݲjfaCwf7!7!Lq8|݌>2]q֙f=Oz۰ l8LfIch3auݧzuaC 8DG0͂:~nƚC=}9nE f !x'W>vg]|3yIcym.>|C|o}:D 7 0wzx?y2~(aHw-~C)(iܖ_~߿e&< \_c>f'׶7)5[_KNT^z!2dC\O~2?SWxwgܧeӻu~vE޸8dsQ*L%,9.JX~S3ZE>|م8qvEx)?G-dWF8uK8@R`az Y?~CfpY!SM!M%/h|{[t ڴJo\G ;q9 7|/@P*48-XLqTڅ)q0qGSfQzWf9¡,Yjhb=KqALHx >$T; U҉U#~[\x[vLciJqnqn (46v<=0A0Z+  G 3>Ni)TXK@#s3V<fLN!l֥?㗟dԉڌa{V)&jsǰ5WL μy:Q3sBE=0g?~93yZa !ub !4pB3fx1[P(IJg„.6f"j͛4>!h<{N(wemIteX>Jc²@R? 6hr77QmC#f6.nDK td| _l_oB19\Ӈؙ%!և~~e߼ 3[:R' gW6~ۜ">&7Ԣ:F]0%mo$}EYԤv\jvx/I>|0} bܘ32Abm_}cE|CűIaK uߣp LSVM<-O6KѯB"D׆0 h &eGtyiӠZVYJ;HS<ƾq`u1xߧۻoՅUöc&ev,0s)nYv;5D[[(U)Gץ'Zvmvi.*$OFLKW'sІ p]֥K&z|G8c1}./> RHϔwe^O) v|4 ,wsSz4דFwĿcRAӱ%I ޅl:8?yNLJT:srT4__f0ܪQيKh<+JutϽc]N&mnMzs<ӢnRVAXI1<\;NF޲Ӽ,)BbdZIU/E1l2Lfar3% t^gG_@|58+__e7ŜVKϾb`'hK~;(<{ Eʄp_Y2Y RX%%Tbh}{| qm<ƸG!kvݚ'hv_n?#GP ٶ;%+!T)ɑմtDw;ȞQ筣~J#>GR_;8@R%: ]~ZV3w)UG"RhJ/_)Ô -Ŧ6%4wQjb`Lc1<Diw;}9x{+>GWTFקTb ӈ#l>|vO͈Bfjz.j Zs UEH[b67^G;Ivs3 ?+ fk.Âh`KFUrٯC2\%r PU =]3vUqw25lx@wVx2 F'N*Tң;qzQ @ary)w۬@kԾ3bڷ*_k^&@3^рR|߶ҲJJ"魊•U~A'd+ǧxTRZf}l\vSG_@n|g=p1,PZovi[DIDEL{3?*(txNu%.Ңw BX { 鰂_ɶT~=0ZMO.}w^P_n5 j1;|_G,#G\x1BK9< fdw VwXN?^!p.:9o[ț,Lw n=|rTN!~4Zæ. Q$zˊ_l"EK ;sMV=G'W?&Ոە|εo.xL+Tڕ(#cхy*{{u9)wi]w!JFЃ30}ڙ[!m`v,`6')CCEΖ)ߖF{Lήl;Ki+@x jqbOYJfC𝁆<(cOҔ➕p'qW?rp[5Ei 㧪It/"#׏(.e_SܸCCpn u;E@4G`/i ec)(J4NUBLu}qjVKWtUV(}/ӻ*l5aǺrX})Z7Za-$:&GA[[4e^{+~su1"u >'@MKXqI>t88'.5 KX;%bQ_:~cOe#fzk\82~Jd1R!L?fEJ'gE0 Ygg87Jju)٧su6s'ҡ2R h|jrcߎ& /lt|xEW| Eo l< cx E#yH'L:7A>c&FlqRƖ)L dz}YP\Mgo5H} {cB${^3ʻOג~~LLu(|.ϬFJ+dN(St/SiS3esqcYõ KØRPS O=]N3)b(V SY@)xw 쬌qupq}gnZxXg(܅٘C#/qw754SkV۴O%]%JwLd8塧!$im!hu& +L%KǞDJ +]>џ狝Qqj4}y. 3XZm˵jvwfO߆3}?b \^k30:V8I"Blۏt?Y^eWR6;`&SEẕIf҃1g{ÊQ :BC6@Ѹ1pxӨ`ѠE^N0XBFC\db;kU ΠabpNGI4aPcxD(IdQČG$*pEAC`S!Me *j-z]^(QC?b+3;;U=D Shp%O05C d,FKnFwV( H/1dDgVRG@,fmc sHK n PbtOrazBB[ A>Y#i=JPn=!厶5.i{I[.A F MRԺm! bĵb\9iD9(Y)YJOP#~jDr胝r{N9CU[V@nR#dMXZ16_~j9#F~7+ mg^.[&CINp(O 3̧>{X/w+]\*smMYrbdo]PkGp^q/ z#Hm웹B[pn{2[6'GZęE'Sn[mж ZҸ[j< Cg3F㥌6֤6qhoi'ݍž}&)t݈tLbH *@?o4^zo:LxM96S?֍ [e.dElogpfOH6 Dn6-kSѻBn<$)TOP\O?eX^Jʑ.Ӡpk|jΚIm?{Wƭ / i싪ؚTrIS^-(xR47lJzq9Zp5CnOx p?AcƆfO]Z%;4.p3=}[ q_OG ygO+=i*㼛yNT5i^{T 5ѭ4PPqvi->+ ﭮ-.6ZfoUM m`UHG:Z]Zf "dL{96*c#gvب 5EI֞"'PC;wrB4P.K}G8TWp:٤(wkShw_Ъ.I2& `d( v:gQmXj^ɰ5m5ϕ3vv*]xG!G ok!Fh)`U- \,){xbWM\UyR5ë*$dm[{["['jgf22Ж[-Vbgj1a `ևpq?]|㩹Gf178.xxvn-]TVmEgj|Sm:-eeUF= ['+Sί  k%N:]Af)հ)?$d//'+UV-Fy7QXU ;E >oV?DM?~Kq|'uS=!8 lJN׸5RЇsRL)vO:Kh4E1GbG9e {XĪ}1jp +ރBn !C/Ih5cl_sF:&R6ˌUP]:Fs@55㮤y ,X8jyJEWQ[}c iͩdߥmdN .J$YWvO""]j٩4,Fc.f:2Cӝ3Ը5q 7~ϺX<}^50ý%îWDZ3['L']eіLgyᆳ446}^ `7i{C g]`!ZC H 2KȥSl1ߕc0 v9lJ ~]xlo,vD̦,X LzdVdB/( ܏]bXyЗ3;FKq#- .?gVeg(l0}.l9oVBKçBB910UE~V=pY}*8x^9>R!PE}?SXC}7ak=XHs̫Cl;n8%sѶX&@y7GwbOzYYfHE2[>Kނue]Y6k K֜DzD ƃ,z9G~ P`3A3F'DhMxApvWsy[/Ll~"sYvrn3c۴?~:vb^eꄞ!K8A粉7giѳPog7Y}uݽY</Y% A#rӶ7B=C'B5oF)&qUvI1pᔓ|T" >.D5z 0HB\~Pj r#< "m)Oc.G$H(8Po(I|f:X~W5f90ژǫ(oXie$@}$e}?F/H}ējgMi/$.bD(e҅wd-t4Ut"g֖l [>!J+._٬*OF.Ɉ1KTYoadٸbDHҴkk1df}.:7bE_ߗ=Ϣιwϖ~B^@aY[_^an1sc$D_f扪aD"h0=!8"}H-" k*.upbEtx"" ΢EXR q-Pc ˅WdΥWqdq$E4I$uh }(]q^Qe[<Ӊjir'%2R B_)q&K%~-MC3X>+[ (8WQMHH;P״VF 2iS>XCa-ȏOT_Nd5J>1jcN)fMģȡq٭u##{GW@ꥁ]Pg9iiIUbFz0$l?kʄuoZ{ӻU_zZJ mc߼Ea‰4} ,'8$Ia44ET@24B^\=3-kD.a+ڿf`yX:9\W_Vked`jG`\ *H*uƣHpId0O2qy`4.+"2ögOe%A-j WQ lQ<=؍Rd];X(?vnc${~q=)AzjO_S%Pgj?dukz^ #f) Κr$`ݾntS^BgWmXz$M;!| 3R9[HDgc;bDڝ?@UFVhPEFwƘ+/KȠߵVk}3J\Ht|kSҸzmVgV bJyS tgwr&cF1nղZ`y JhǡI:Q=܌r5USch'A@.K}s)1q&u+6 3 VwB}7$pk"Qq9,{ffAt~pV/2Xj2i'Wt@JYBxl_G:lf#Ddvc` *: xƣ aXa Q8̙yB?XyiY%R}j%wN@m.v9W _<\R[J1F tjg5cFQ0'Q6xq^%ҭ pibcX%L H$Rq"aL}!i SHJaDR_e 5p@;9:5魰;71 C4G0i jA`nz4WןnjC_'|ݤW}J.$yWۘ}e(Z+~7v_fYSN lq'QUG3µYƵE2 ',r|-A_G#S- M`!3"->|_f3>%Cxw UZ*|kݑ|MP~)E"!p㲴xa՘y]!нP=w(9?a#}Io&fñCOFV5+hߦWsԒp]SRKӭ{iDgT S4E)vM\-{^3eAST-N_feɖK"۱JVKM{'߉9NBEE٢TK] g:i\9U }dI%’|{sX;)~-cN[P}rH0[nz}0[b…;6ĘN0CRdXfRQ!?%fE qaA2ʵY1!TJ$봀QxNuqSvO70.09-+sW835_RU"do-=N'a b:?ty^JM>I1C+,&dMSEH"DD J'KkbIuܟK9m~%kxOva+ !΄aEWNw'y@r&_E;mCB&WyL)bI}(åQ\+yFYzNX4A"AN1 {'|< 1JeL!Ȉ#Up 8똤V 1\ΠB`mVg*2SpRԽ]6<緀gHlߧ9ޡZ# "&)(O?m!ߌ'YxdM7ITe7>14Ҙvt:GIql,Vy%a~KyMw_aqvnl\-F 0 D_1GPt#d3;k0B'ٙ3'AljU/ U$NKo/9f-o,yK(pCh$:C U90ʲ,(VMW# V)`"@19Ba*&P)'T}H?oslewZM*$l IJb9s̷),VwM°Ajߚcg9w]1 4$FwoMg#h1̿ %7 E{iΚTu}M7TpT5R5^f dG0A"{= H?`^9N\0r[bHNZS@4tM@YɣC_Q,lDB7'+?k0ԈP5c|):7❷`'FxnJamD)@"pu^nhc2Sbmf@5q!*(\!SH@G SJS}NQbW=4=d_-iC"e)*Xg^'@WP*D3Xz% M(B3^oŔWw脽i]Er~b͛Ě75okTYꂭR^#՞#P z&†K$ ;w,cXGݵܼAl"\sA< "ZQNQ縐Q2FIF}$ao#*oUpr15Y<8,p*a9u*NU4::#g`<6}r;Lt0OO OS͚|h":.~qfW;|qh)牗tSi#c$ag2l|5f(6H ƞO_0Cװ,+k.9m_w8Viax"dn$jJ8&8L#@=3 4Pt%Vq`+4*u(IAY EpS^c,N yD:,sKd'~a~wN}FP;g/>kL!B)T T dCӳ1P ϑkĎY'H<4( gT^ 8% 4X|#A]ĩFS0d P8GdT‰#"3D%( # VVEz]pSuXGJ3RQk<+-Zx4hL쫴Xky|&a))Gㆴ[ Ot = Ub #P舖rH^*j4( ;} F?0< "F9 EYw{[(zmǛ9 Jִv3Z,+An@WOC[}'`i-ɷx[T)f{Ot:;fe {OQΊ!<wSE 0Jb~Mevu,҃?tv0hAZ;l_1%dus_؊B=pbV~Zuv 2dիGo:m䩨|l͓$VآMRZSg0jBeSRxA<٪7ݰ8qqVgnst^u)()&C>_f\ŀ iWvܓb2L+U}Lrkm.tL,͒.'^X_HtK$gtz\>:-?7[┱͍YJA(a\|VN$>Z G-&PJ@=1R8dı~ ahYtX\i `\ ABT A| մl\o*A431e l m#d@M;\(>87.@U lBA X]<&jln`Dxo;Ԥ$jPAM!4L^+jXSN:/%jt.5) Wj&jBDMAV]K0Q+]%jzZnPg[?P3XFnk ,4W5z5@z-qz5 b\sjn'&DG94p$hcMGJq: duv99z; By@l/G@sA[Vmw l!. cfc Ȼnlٽ4KcHX;GYms>N+1J2MЌ e2HbYpZ@PਔI8 3|JySt6שM6jdo$o\,.Oߗtayd?}䛕d͚3öb/MxyM( qߗ3>^YЮ!wyy,h^O̕~/Zޝ<K7^$9%C*nΖF 3AD>Thh9P_s؂zʪmǽ_.c_+T\S=w.VUEW 6c܆<ͱEyekLF?fcW>ܭ>NgkV|ѦMn-U j=+ڵZ@-*d]S:L-v-zn藨ԒD ;S-Nq2.k鷵<ѴwBKT Q*^ mF}Q-9f2ɠZs ٛq/;EPisNc}wwB}(mXy6'_JP%ף+;35Y׾c5S`ָRkBB"Z%SðKM@[7EajiP":bfL*S[jDK[ hLn=C;CqPWQ{)ڜ`opob}w8n"z|3/>tDkpPt.XCJb+ǽ>>~qw )Guǵ|_SJy :]_얂/v>SM&ܻ#c*కүZg f8߽e%w>_NK^߅** AjJr ;ôTj^p-cI]םf}Ey'bECǵf5+RRAe5Rz`%voL m/dOѮ`@5MCL-N`[ L$%**]Jv)i\-PQ*Z$:I?_7{ޱ`+Α 3 \ّtk@ys\۫`495 0.hŀOJHBʼn֦_М4ԯ@o5/QʤI quM~]Bk^g{/։';t-p VYid[h|A!{lc4)0pOA/B)U'h6ic~4옊 #4ʭfVr_:Qv0J'w| \2I +#ŖHi#tM8=tõ#jխ-|pX\4A9Xu>gyǒI{]&{'jzJ^Yx]+x%/Qn0;j00dHbX%fLgOQ.[gM%|s!^^ܛxPf<)k#8fx3ꃳt76 㨙~<|\8x\ n Fs2 G"0hTPeo8eO9 ޠmNmGgMEjӂUQũ"bsŜs5L ,&EP[UM2P GS.D0ސA8А|$U H•. chV4lg(I^ ad+NdFi5&`Ɉ*$ cÓ=!&!ptyĐbEIEMc@s:.`üiNvJ zUe^kr` ڃ] gBGVZDZaeV`C/5O߯yPę`% Gl=N҄NysX,s6/Z+$ޖ4w$) F( :QOC0 6V@s1ѪdB=5~xh\*bǴe'bBݡai>64YE#/&OW݀H΢qF|<I?oQץ)txj@i?cP{32h܌jQAQ(4;:Hrs jwד{TDgĨE54ISUAB"6 ounO jֈ4R}`49 rz`{4Ԇ!Zz5j#V&C8$h:V@#}KPCDLm >{t8ւT(E '2k1c9̚!f:k$Xa/~=!wRo0z(dK,\1 bAG%Ę- `VU .*n]=-"hg@ȑW !A?"I:R׎2>&Ap@+F:=!\YV}(Ԑ@|9V5m+JQ\N.ޜZMހa[$ O+nGjTxgpO7{с(Awzysus~\?]. rEx%?׿xJ\W]Co}ϩZJ9]}gwmc>:{;@0|}!E"y R cDg˳ܶPH#,zk+b:0/O̓y +ąP- O ~w;ccW 9rV8G#v=P^9(-ҧo/GwETɅV? h-=]^IP5>,S̾ E"Ka 4&m.I^${ޱd)׽zґ%T%  #m*ZL6T|Ej bt#3x ^O.xOEz#'gOqhWĞ%!%/=Wݛ=ug/v\zp).n)јL^F&r7O*99 Gg^}p eq~ 3;r5n=j.r-Y( Kx7+-aTڲ!ܚ%94h3NJL.]'!nͱpy޷>l7uTrP`k҄hRQٚ&&֠&E>Z4)VcQ3)qhNV`&%,uLd2*h/e f&t[nB.bm->h"Wݖ7)D<8DO T&$͌&Ej'\ƶA~u -G 2)D `D$EXf#fN !]^KJ* 9JU_}4ԆAPWd{wKljVb_+Mpr >g+'dHIڒZbV;/]bX_cG/>.߿ߩQ/U%SySLY&!UYe~:Lx^%'S%VQ˽ɜP ȄRr)_9JD{|7ʉd8gSO=& 0z6bf7 !IJ;M]=n\wӿ^/RQB9(5Cc "Y/.{~5j|%3AD!R2X4Tbm\".\嵋k֠%`+NpR`O,eӗ0fZ2ND>рDCm@:M@b= b+u2SG}h+718IS?8eh6Q r*&7!#Tk,޹'Je)0#OiGaI?*ƃbt^\"Zyc^$ǜ7tMr|<Aebp+F4&1C0s73!!j3w:|j0=Py6!` 0!#缡n(ޚ>85 'J7s>s6*қ Z5uSg"$X99Wĥ1HD)iջVd&qq 1QVyNYo(&(Y; BK(Q-%PJҫc+%~]eXTĖ:|{'gW >lrw:s sΒt߭?cb1dD"AXzLЛM:=N%RhI 㞗@k>sLj񍧓~;Zw+􂜊ᚱB;5Ak ypUW>*z@{CUk2 E(1N kg%jHkq n|ztOFULTw RTLqLnRpٸ3xNBqԔZ o՗w^*! ɤ5})c F;8?_t7& 9 g*я}UzS88gq* #z٧/l/{K]yYzK2jyٽ$*XMNnYy?+[? >z[ύ.mƖ Ô$rH;~ft?>d؞:;Wv|iRϏ3 EQ0(TUz` 8= =ghm{J  %Jۼr3:lSo>޻{ ?J|޷;2QD.˪=l+=qNv1)pR@+J͖Ys(U.`is<Q}6·HǔvBg|?-qw7;ܥf/S<6i?4O"XpZ2.%CK%{+]{Bl=-Vv,OH3TS)@Fl;!rJg(1GhsKN"mSJzH.{ XIf(0FDršGm)Vr4=]%D2@$*"_x[';|}n[vE>>,U˟Y#YϿ?wW=ǢJU-7O7w|wf߾YZ.~PMɛrw7<m}\Du W}v2}իX_?ޘ]zC_|[믽;=hjChGaM.醁DV <}cp_T2>PZ2PH j<&9ўT4<\ܺ gU,?ↈ_B{z`n%|` Q\X1Ɛ#n~@:wE pfdn?R%JVcZMR!.Q}V\#>qsD1$`8ڻJhVUcH`I})Qq|=ጵ3g Q!xìv4Juq斯p4wK1U{ 248JIbi!vZ$35q5 L1L c~uNWE)AvËhUJ:ow}?;dV fο^l6mû! Z7nt}B.ԦT]I"%tlq8J6~~s@;g;]"* QtM3ę>៟UG?~qn]M65^ ە{]]Tp<| ;~1C;%Fm}K} 09o< zEC! #|Զ4{2ȷ'/]c [K&]_U`;?<;W[w5X\qOcyHƔ!'82 8 _~n9Vy<͢Č1Y&: y)CNrxX@„h wX?+YgyXQim?YE4dGN]zϾ>0hrb+7=7OJ>ΟeG7OՒn8$5*u%WJUˆ_e}{A/-t1Ю~%m5x:KOk%1^*p$t QӚ1">p=F k0W\cL\byP1h^^6r!u#D)aH 78a;OD]c,Bx\yB\!2(ãPk$BԚ癨1NStmX"=E Ud@q堐P\sظQAa*0G PǧcqΣS{>*,V=aƚ~=|dTkGJS o&`IɆem&'MjǔC#Ĺ5bO|0#"Ʌ1>Q[L%ŒC|%]e,MWiWqVO貋ô!ta!ݿHb=nxO0W>qx)v^k8C!*MB<1Kt* P LA2P~gIHgI҃xخ`F2 Td$9r!mPkCm8p.ꛟkH4B48(QY:o*Fo#o2boI#jhg%:O޶nS6N7XRh֭\VCC>s=]IMjlE,nԙ ~jƳJr2:Q'#&Ad#J~ M4G+o<缣 "ȁ1*c Y@?{᥈2)R BƮWzx^bLLl-1>?'btt/dJ!QP bճ`#7t5r< UI^J!sP7cxٜvSv__?(kpXG)5&t,x1Is2j|;N~h N)c!,NBzw8RtXݙ;j6,tf'^?oY޴!e%cYY>_:LI@u#xWL%tau?~>:v&\)@ uxCP?&9G#gGzq6YЪ4t D/dls@ ;5#˰QK$2! fAhm7モӬ; VM@4ɼ%yhc<m{Bj)$8DI&cWJ=ĔWLp+޷4 )hBDd *q$79$uNt,NVC|!`/ }CPU4|0Ãު^mZt]}4rխoo氱 ~:t˿mW@냱woGXP]Cb%h]Ɍ'놫OUVL|=jJEFR:fPE]Rސ֋Wj >TA6yZS9b0L姣'jHw.E2%'\Zp.h1cBcVZR^>pK֨ 4`81|eXFU$ojZG$zGAGf ^aCɪ(O|5Yjds^S$҃ꥪҗb?S,Q, ~#Qy*vyb EH4Y=n7/J1Hoh :QZAcnVLhv!!߹N)вB#}'/_#8_}Yz+er復2񚓈8S봊DJA?Y UW&>SħPx*umΧ">e3v#*$>)Sf@ saQ]-bLqB7:b/HcR^\5ჾ`lQAJ7o4f ?ʏT ?jHw.E2% ǡvS_. 1Hodi9SiRB[ EHDV|+X5WoY-_(ܚ2GUMkfGE(-/t_eRҜPP.xir 0BTy'.GwS *o,EVQ.ZXLla Ul\@9 #)#( : d#gw /l8IIqvui.i}uj5N7rkV֙'ZDmVUB!x8||:ҾozB+Wj= L ]A"p{>"3Y_8+ӏ?(ehgh`>8$ 3},:&M\T, [I-3~Y("[E QE@rV_ xyO;DqS&wA=V*_aͼ1Q!D mm4̕`:`8 lhhʤֵd9NPih}hCFy7aS )h+:gЗ޾I׉)nOz_=mFkҳ;ALv wDZn@$ Nu ˨@dp(Νm'8M7Kif[Gg]ԄDoWrD.:K = zŁ[#%_/Yj4(jy$-TVfqc9"o"܏IК(oDdV:c*P I'4!n6+o@Q!mxZaNf\ V XjP!XSH۷&0oV4m?bbr>|[xX;uMaxET6iSħPN<G^9v,i5LXϟ/adrEoӨ{/g! .Y"֝W}ClVsb*$5VI΀bM۞\ݾzWsM^*Zkoڹk8U~Rvk[B =Ι03H"k8.j:wgi`2FEYJ2w sPE n[X#"[p``~̙]?3MJbwo'g#~yREs K:IB 7` S۞)I`NLaF C<1N#%P!OLo:DriJKĤʒ:xWm'1;"bj@DS{Eor4W$5Eybeh=[J+?ųum1MDQdm<^Pl:k@U[:P)Pbp Ja6 ']}b2 bӍ|9ɝ00ɝ40ɝFatӛlF?eS.epr2je9ax9ahܧӤ- 3 HIPkw@2*\nϥ s KYjgɖ8_vvgʺ٘S⊗yY&Ai4 yU^ف [v'b\?}[_Wו<\5` 59J4kI7hbZ|6g'8%_^p!r}Rh;901@ I@ ? ZA'V? N:TwZS!QP T($g.('3@J%JD8/jd9p3PZJ̎:4$_Huܠ˦%0MColO Nޑ$2X wNܘoA8O&spI} Ԫu>ρx93yF2_aG |>gh _?90Ew0+ڟk7rI:}OW|/v.iߧ}U_JG,Pw@; ظJaoQ?OVB.O^}ԭ`NU͙Qzt:+ѿmن]}ׂ֔mza6RZ6^Aj8gтRΝ)3dmu]lre! A ]SYS6 ; 3͉"9e Hϛߒ'6 T=Pk@]0q) [q6(>`<ر޸!ZC$.zAS(ì)`4gŹÞr.pCڃco5B2!$4m{p*ƁF$Aq'|.F,N@b 5gBȢ`> L).x_JP0t (@$AqD HHJz< ~D[пiQ!IFc8 A*%:WuJn$0 RE$[1QK{mC`#o8$X1!qS6\FK|Kmfr Tu+`H`k=G/n-Zb^EB%>'=W풐t3 6qE.P0 P=O%艡{l=?x R$*i&ƥ_O~$ R#7w?Tfֽ`ֽ[vc׊r8Qkv"wx B HcD\Qĉ8m旂oxVu`u@#FZm'jlN1iRFg0yQ0͝ISyNrtN3qai#ɱ$Lj#I"zj,6Y1Q7k<#J#-4՗wJ$S?`LЄSI7nєM[ j:HWS[ ~:ȗa/V7d `_Md^Eqj+&bH:VKyµ<<HaSDPI- ҷtfy|f\z,Oiņ՞&\ -YL\BL,zb $fS$5GӿcMJ} 4 ϶ "=kLPR` Ǿ}R-.WV/\(oX-4׀ *颌m6cT Wc<CO/Z;MC$OR]]=h؛]^/,&!xWкJ0y68Vky}"eFA.3}TFWGwM Vof6/.b8Vŭd_;qy=סV|*U*͕ޭx-V.7hLVѭW}P ݃0 RF:0 XCC Ȼ(X¶]G> hhBgTgrZFC!LM ׻BpaF4>2o{Tɷ[Ǘe?=Fciu"o ȾBߚGEϗ䔴')[GtJ*K=͸T77.Dx C $#_ra7WyENޅ| [;JgTٚ}~j}<wG>ZX 3&ˆcAp}X!9Ce6,U?6/'K-SPy)sGydYù nh0Hnfo&Jh%Fr#ӫb[^QϘ RIY'$lJ~.aSvjȰ?WӉJ}cS}[?=9371P G-"x*©K:QƑ}U:)Ya1/D8eFm c6ի냀Ųy8S揿7wS_cPjMvW'7G٫p;O ,D/qy]u%$B4zXgtI豥`C{0{2}?.cm~~zV.o/q_ ,i%0&=OF҆@\8bs3em ֓ !0H*^Nbb6!YF'T>^664+ Og\"V%~, ,ê1ԏ:BZU5кx({0_lG)K %.Фp&$>'EL@?|LJ*fRˌɀV-lL49~`Pi 2sF5d%3&FaKؠ,I$PR4qF $耺1nBW ^OMΟ`A*TzJܕ}km> 0!O !ʼn4F[BᣳܨAR͉iQxa5&R] O7&Z0R,j4- XMJ%!OrdIC8NVTnT +nÓ:a0 =hDt!%.iqJFܘ9)A (T!̀X,ӞG^BJpxl@$,U!9p![~+UtKEvad}ka`8?,yL_^fŸ@mkk=_Z]p<\6|Y8zf2?k5ZC2DtRO?ng1yVTW/!` mھҒ|agg6}GRAo#Eg'. }0x33 `i\?^_\ZM!#ZdF)׼]Pۥ}ݸD)(}V`tFbgOL DS4ըf41!$z"c #.jÙb RZ#g25T*ѣJT=l$+fh:iA{D3+h{hzq0Lj,'#JA U$5NHe00vUZavԊã;2Xk=+e &:T66C /k KnE6r:-.9[);'œ r=y>Fuv8DXYzwOMM%uj@MSHDFFl T3o!d H GEF=<%h5{En1qHY ҏx E[u.#e5!wSPx@0j15"6"F2@Bom6MjP G퐂M./kd:Or )s5@X8Hȇ1$Ȟ5-,nCw۲@9GpRE _.p܋-Mh3+QFKfWgZm5 ez0ف;K-E`K+dmЮYEcq]b]p2 mPVݦ Loh*]@XnFQ2Mgfl*.\2? h@ޗSF7&DrS IQz'RIs&Eࢱs +.Ҫe@ $%Ǯ'!hFNW^QQ@IV %ȭ:-hPm?"NȈțptݨ z#+Afi-JT3-11@;l#Y C~ iy{ =j.ڣtN Dz0بxcJ:GqV< SZk}e%A%lhCDܝ>RF:+3{]\-W2r-GaH* ulmtB~NuIurNl}gשF 3rzSgiBvJy欄+VߺG+}xV8JX釩~vjTJgFFƏ~gשHVԚyVj *r+[toq;Y>r:Da6] 5h3px:;x_.P`ѡSjaP%OxTg&#ҭ!WQu>[.ohfAƺbH}'?CBh>)2no:Ie԰=p5Mh:Lg?^͹Myx߾g2lcpI,\+m((ZB,g5 R]A5-]9QEUD>$2BYkJukALcNJ ɸF;ŭ:{!QLGR-[Wʤ8䩦g-%9vuKNI[yՎ1?<+a@nIu[zm<l8l||\<)HH D~W8V=2ZD'1MNcƠJnhtv\$et1o[_LҫPS0}z)cHdr懭݌ y͡/! Tk S^:!z6OX˗ ݎ+ 󣺞+ۋs=>ms9yyj°u a{k|:\ǟ^xŭkEuGF ;1 G7/}z%fk G#V{ݭf%]bfL8v%38$>yw<睗Q..a\f M>H 9 v&{k9wVJ'Romzv7N Oތmd]WsV3>8=尳toH' YJrac(]h1 ]{))LXNDй]6;;qJT?NyIRD" O߿8.S56a4eOmXaw=IkGN/Ml|PspB$2d륓I*X߹Wv$rsb7XhF뎓Rg?(%&|.n+9Z?YO% ?Zq*{@&OypJ-6׌3khvXuA V)31O >QJ؈y&Ēꬔfy7N:4ӠZ1'I'7q<-VnB5Iب1j 1wsKjgwbu\k]N5[>5رb 9ŖY @NI&()d\j" w1F=uPbJBs`ZJQzV3KT1kaZ-bff6L*,?Mm-ڵ~z_h^6XVϠH pT}122Rbf)n EWof'z1OnԠA= rT[/Vd,q/p$,^ژjGVBGLSDŽ(1)xʰ(H8O5Ê,آXŗ6&wͳJJcXSʰ@W}a[-@:ik L)$BE*ɾÃ:"Y:w|hk҃<}#208AaѾ8f.gFG< БKDR}}Ҹg:4hpjql>*=Fxq )St\1\(OҫݎyA1(#ej$TavP֐3+/2:%!5y9"9M.J xdF1jbX520SD1U%՘#)162R$ya>05Յ˜^J#]K4i65Qt z=q,5&C H +"!Zx̲ W5Qg~n41 NZVuN37VԷJ?DTG+}xVY+}!FGr7PWO>ٻ8}y^hCŗu僛63_yqwߞR/tx D/b@_])՞M38jCmt|G6ORjǧ߽кc韄{WЦq 0.. 75K+5OM;uo~LQYEj|g5`&ܭac'm}7Zxs$V?F_igjɎn;Xg!Jca&a]hF΃?]ٟ~5 ٤n>Ի`K=5:>-Rjk98j%r(rj&'{O~IkDntBcx A_!eu/O|1OtxgwÓHxy0oy7WVIiLУdM)9(.D*MZTdmH Cos z_ll;PവD5Imo̿oHIEP!n[" ɐy<‘.Ty7O!֛%_bD${|ɷlgB\ݭ!&{3q [|.S#j"-Zu`+wxhrhC(rĒJѓ\etpPHERER߬x[WK)$RMS/b-:ϵ'څQ\oMF}$1g4hdP]b1&R@EFtcGQFȢ鿜#ouq^?B Ճp3 +T~}+6T\K߲on?|b0z~)W'0UZ53'Q&ق!nF#aݓw(2q_!.i2 e#;ݞ%yy2e8jy 34*q2/h>+#y;u j g;~,肼kۂ򓭅Q C2)=+-2Kz(] N6/hp:t|>ibg %3>[eʎ#_`YMDT\ɠ+tm@lpnN{[56/=7W>|7 VP'op?!ow<CܸY :`NMg_;aÒ+=q"i hdҩ˕=O.iglWlkbv#0b{Ha D4˭UOQi&~`] ^ف[MzknM6rn{z0U2ȑx#/W;W%{6b q^/yDjp577N~F3q?t')LPr ̘Ce2UHEmKKPew}@;m[8t|K-^Dvq9t@lo!|}KzyppUz>뵶Pk*yf6ov3rrT-ũyɵ?'^AƳfФe3q8HNV3;~\s3BgaGh Mgdz9 Or;C^s Zaev}sĝy' ELmwnFǠݺҠDtv;;F,NGf鹹'shq ZE5;_ūKpx euCE9߶ Y(&rPd;ɸFRNw8 PK,-Ձ&u #sԺ6w|ABQD!!љQ@- Z 5TmKR#ZZflE2 TNQR徤hI+x["\،h GK!7N3c;tZL7Htz(Nϟ>qUW+"=Lw Wv Pz-?/5?Lbcz z:@+>wSF\8o7̖m^EeSpMN΄R@S&PJ`u0O4)f1^L!GKt6N~4s*E3U](`*D0 y\6 #w-X+xMr5R}57!̮oZ Z24ʜ.P܎ؔ|?*/+w/TjVy~ʵlXT]\Z<Ze7v^5k`h #t.I=Ӯ0%Y^˽`gCI-[wERTggTAeеjhl]#~0Pjo)3B%k\h;atF˕\G]LR1ڣZ!7t^-uׂz-/zbvc=CZ :W>ٛlŧ&̻(vN ƛIwW'3fS y v4u wЀÕn9Q/j*7_^u 5;Ϲ^GY6osL-0d5 `ӛlEΜ5˼(ZuumGyMT*xH}R{yp[$P EЊ&L*Z!/ǥ]TFJ}s~i J̎?3&{ǦFw=1/C|~/" ^gy;^ laCl*179{+^ suڌ 'xsV(aIcۡUN,aUTQXÒƠC:ډ:íFxI.ߓ;j, ѝCzQ8F!X5iG NGt0We`\ Dm P^U8 $'ѬЭ+y|$ tAsދXϟQZ"l'"EmܷQD}͛ʹbLִ1Qis,ɽ@Į'} bB h˸;ɶ ,A4I¬4(2IBjh $hnAk-)9zW  ?P(r6zEĤDO;N0̈́Bb. N &AʳCI*C2Vpސ,,,H#zFh /uC@I6 P p,d$My討D)Π x ^^~Yi mx! m" OkljܷZ،w=N$!2W%;kiQ5L =qQkI=7⏆hh!׸㥾Jgic˕LJ($9f-IcuّNMLKxLFxQS!d]`O$P-#m:,>Յ-zdn('V䜡N%,ۼ;.ϼϖ:ۋ|sq=}iQA#o-z&"#W*=kaw1$́Fܙ@`G=^Ėa:ٝx^6(DAQ芝hXm%ƖfjF𞻷uo+yºmB݈{\ۊDs;k 697JR%>bVqe\c6R 9!hp:4|nՃE-ZnmUwxÖgɎCP~x<5S.= kXѬuZQ@|u  *k+bv}`7;74=i^1͂i1ON.+o~EaR ::g׌3 ?G?ͪ0Gs|BkNLgw>[k1@kзxr0۸?TСPԧJ0ITFjZ$NTՆwn= _svV7إ&ʳ!vS¡ { p=$\iFw԰;/SKoXyXhqs﫡p4nQ9BBarh cDpEYd69Q<)";m=TC0L8Bd1G=KD?)㊄h$O.Rϼ@{AIh4pMQā]KjkQnD"H=\Z^IĨHX F% NzSJ8+5HIN3Q /LK|OIwdl p\ReqAiK|~Ko󞵍4YFZHcJ_ JI e,<㚇*@[FᆂNM 9#h? AOnRJwN9eRWs) +erӊ%KO?vєʆ4!TMpj a5?*W&0Wy Vg}&_b;$uZ>?#|ݐ &п3;je:aՕ,?se :@*Uhg@ţֈ5ѸmQF_#B]qTi2>´݇"GDERzXHJ;JS1vwF`QrHFy?ι22HͫH"2rJ "|%4hB~"'-5,VY #1TNQ$ |FB~D$e*y& JbE~:+뭪$֠ƐeJ5jpl] "-kQ %Nw(ٻ6n$WX[ /Urjn/) ,[p(iHáĸ*6O@M8)ɴu;PxM* Kd ƭ-YV 9 q KQrA ,GVfY;PA W=0V95Kb\DXD%bJ\ģDFk,s6B$BLWY9 6C!C3 :v`'be6Qu_9󹒨TQ. rEY,qIJVq_ Zc'bX-L(6J Ӗ0ez IN,w,R"`-ɲ̰Ta g^3 4G6l>ĸ}rͦ*0*c>{jׂ+B851ZblC_RS4R1R"4АPw"u禤":)ut&?%i8ٕI"= $֪mI F8)W7cZ^bi%mx'-cUGz[ڄq6WIj#^ M6>pXvsXڷJLgk;C(_&xjM+l*,z~18xHڔǵ{Q 5>qn %HM !=}HKeLhN˒28q!EYR\K))(b(ʮ) lzm){y]gnхAQU V̵B(f6Gtv;ҧY~~n+.T]f_luBMS){VqoOK経okpT`^Sdx[ VK+f,љk.T"Ք sT(kDKA`*W#ëP[g-A3T˷Ԝ绨МM5aҸF3+R sMq D,Yͳ(gu\g/Pi0NsY12J4h"iKeuɬreE)'-t>\xw7}:ɏ[ 0mPnAb_d:&(8($*b4:6SUc+2ѱ)U܍.aIX 3((BI^( UXe (Nބg"B Z؉ϼ$aX`$t%:ɤ bl22V#ײ(!ܱ<+(΅C;0ex8,jpԁ1Q~\2 &iʍO:C]4cOz_<4mi+89Qo|d6ggoX4{yY7y$ 3kzunW 6-`foON:bU~kњ:(E8Lu*[(=7=x'ɑ6Ib \iIs& 7G2FZu,0%n蹋BÂͱ|O/ {b\w7~^㕷0-9SU|M{>}Z>t٧-jRfmxZ?FmOi<=WETkbdom3ފs>V$4M@& 7xb[@WA*{6bm8ơ6 u<!޽yב!Ztf^͠]23̜A 1*ܹz J;!IތzÕ3rfG]@n7}?_V g-G/]Eo̩ļ(̔eXhE.5,$ ܰZ@#;նԬ|)40֧brsZ^VyR6 ]8YEt8*%Il ap=;Q%iliPT*Al,9 2CR`Ɨ0 Zឝ$櫁R\wìMq W~8w?5;wGFxT^Fw}ۊaJbMJ GJ?f~ex Ǡ'{yh7Cٵ gel1]gt5߾}-:F5Hrw_x,)&y?3WQuCs-mY4 d0cPL%=}\{SIB^v)%o.>nFBuAtv r[Bڭ y"Ev4V v1h̃WuMAn >5ұʥ-&g_G5]FB1XEٞH8}Ld}B*B펞l'Pjہ28`e NڣJ{6@iʫWHj)*^! O8cT]}'l'}n!jaH < {Dм?j57`w @qM-yMto^^ƲI/7S*ck!fZP XyjYL ͢ZHzxk1*%pOs"'kםjEUOiR.!z S÷:SUOQ콺ՠUMCĒ)'@u2Ґ2r BqP/n3ƒ<nC I yd1-tpvvi%@ !5(>~FVUG Gd`H͌6:#dV|dR6L DDTsX+^fKTӪ|Sz=,M]覼v F#s_]EE1^nb31&zzyE,NMl:J=\jh+ sr3 5Zg{=m1 oECփn͘8֦֬@fjwu=\p: A=04Χ`=_mFO]%͗b) ES>]f5~Q"QJcaO)RgMjdRzR*+RN T RBsK esWpϬCЊʰiL` M>oi2Fڬ}ǵuk MKdQJmqִR('-KSsAJbROgM~CRiR1j%SsSJV;J!J0iR*]jxSJIŬ TR a"pZU+E_U=bŦ))VTuOkݶsJZg;AuZ=6שƭR6L)G)aJVZ[pL鈺tr1څ{^Z[T<;%R!LT߬J1zJRmVZMWMZ var/home/core/zuul-output/logs/kubelet.log0000644000000000000000004266003515136653537017721 0ustar rootrootJan 29 12:06:24 crc systemd[1]: Starting Kubernetes Kubelet... Jan 29 12:06:24 crc restorecon[4697]: Relabeled /var/lib/kubelet/config.json from system_u:object_r:unlabeled_t:s0 to system_u:object_r:container_var_lib_t:s0 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/device-plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/device-plugins/kubelet.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/volumes/kubernetes.io~configmap/nginx-conf/..2025_02_23_05_40_35.4114275528/nginx.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/22e96971 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/21c98286 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/0f1869e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/46889d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/5b6a5969 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/6c7921f5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4804f443 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/2a46b283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/a6b5573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4f88ee5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/5a4eee4b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/cd87c521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/38602af4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/1483b002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/0346718b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/d3ed4ada not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/3bb473a5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/8cd075a9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/00ab4760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/54a21c09 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/70478888 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/43802770 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/955a0edc not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/bca2d009 not reset as customized by admin to system_u:object_r:container_file_t:s0:c140,c1009 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/b295f9bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/bc46ea27 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5731fc1b not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5e1b2a3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/943f0936 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/3f764ee4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/8695e3f9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/aed7aa86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/c64d7448 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/0ba16bd2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/207a939f not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/54aa8cdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/1f5fa595 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/bf9c8153 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/47fba4ea not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/7ae55ce9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7906a268 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/ce43fa69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7fc7ea3a not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/d8c38b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/9ef015fb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/b9db6a41 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/b1733d79 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/afccd338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/9df0a185 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/18938cf8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/7ab4eb23 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/56930be6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_35.630010865 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/0d8e3722 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/d22b2e76 not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/e036759f not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/2734c483 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/57878fe7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/3f3c2e58 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/375bec3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/7bc41e08 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/48c7a72d not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/4b66701f not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/a5a1c202 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_40.1388695756 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/26f3df5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/6d8fb21d not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/50e94777 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208473b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/ec9e08ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3b787c39 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208eaed5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/93aa3a2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3c697968 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/ba950ec9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/cb5cdb37 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/f2df9827 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/fedaa673 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/9ca2df95 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/b2d7460e not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2207853c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/241c1c29 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2d910eaf not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/c6c0f2e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/399edc97 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8049f7cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/0cec5484 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/312446d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c406,c828 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8e56a35d not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/2d30ddb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/eca8053d not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/c3a25c9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c168,c522 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/b9609c22 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/e8b0eca9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/b36a9c3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/38af7b07 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/ae821620 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/baa23338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/2c534809 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/59b29eae not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/c91a8e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/4d87494a not reset as customized by admin to system_u:object_r:container_file_t:s0:c442,c857 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/1e33ca63 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/8dea7be2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d0b04a99 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d84f01e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/4109059b not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/a7258a3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/05bdf2b6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/f3261b51 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/315d045e not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/5fdcf278 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/d053f757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/c2850dc7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fcfb0b2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c7ac9b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fa0c0d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c609b6ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/2be6c296 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/89a32653 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/4eb9afeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/13af6efa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/b03f9724 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/e3d105cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/3aed4d83 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/0765fa6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/2cefc627 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/3dcc6345 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/365af391 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b1130c0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/236a5913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b9432e26 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/5ddb0e3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/986dc4fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/8a23ff9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/9728ae68 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/665f31d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/136c9b42 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/98a1575b not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/cac69136 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/5deb77a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/2ae53400 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/e46f2326 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/dc688d3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/3497c3cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/177eb008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/af5a2afa not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/d780cb1f not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/49b0f374 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/26fbb125 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/cf14125a not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/b7f86972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/e51d739c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/88ba6a69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/669a9acf not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/5cd51231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/75349ec7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/15c26839 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/45023dcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/2bb66a50 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/64d03bdd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/ab8e7ca0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/bb9be25f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/9a0b61d3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/d471b9d2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/8cb76b8e not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/11a00840 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/ec355a92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/992f735e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d59cdbbc not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/72133ff0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/c56c834c not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d13724c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/0a498258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa471982 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fc900d92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa7d68da not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/4bacf9b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/424021b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/fc2e31a3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/f51eefac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/c8997f2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/7481f599 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/fdafea19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/d0e1c571 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/ee398915 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/682bb6b8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a3e67855 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a989f289 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/915431bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/7796fdab not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/dcdb5f19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/a3aaa88c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/5508e3e6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/160585de not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/e99f8da3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/8bc85570 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/a5861c91 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/84db1135 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/9e1a6043 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/c1aba1c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/d55ccd6d not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/971cc9f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/8f2e3dcf not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/ceb35e9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/1c192745 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/5209e501 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/f83de4df not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/e7b978ac not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/c64304a1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/5384386b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/cce3e3ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/8fb75465 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/740f573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/32fd1134 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/0a861bd3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/80363026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/bfa952a8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..2025_02_23_05_33_31.333075221 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/793bf43d not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/7db1bb6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/4f6a0368 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/c12c7d86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/36c4a773 not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/4c1e98ae not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/a4c8115c not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/setup/7db1802e not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver/a008a7ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-syncer/2c836bac not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-regeneration-controller/0ce62299 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-insecure-readyz/945d2457 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-check-endpoints/7d5c1dd8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/index.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/bundle-v1.15.0.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/channel.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/package.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/bc8d0691 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/6b76097a not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/34d1af30 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/312ba61c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/645d5dd1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/16e825f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/4cf51fc9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/2a23d348 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/075dbd49 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/dd585ddd not reset as customized by admin to system_u:object_r:container_file_t:s0:c377,c642 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/17ebd0ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c343 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/005579f4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_23_11.1287037894 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/bf5f3b9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/af276eb7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/ea28e322 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/692e6683 not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/871746a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/4eb2e958 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/ca9b62da not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/0edd6fce not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/containers/controller-manager/89b4555f not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/655fcd71 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/0d43c002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/e68efd17 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/9acf9b65 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/5ae3ff11 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/1e59206a not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/27af16d1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c304,c1017 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/7918e729 not reset as customized by admin to system_u:object_r:container_file_t:s0:c853,c893 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/5d976d0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c585,c981 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/d7f55cbb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/f0812073 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/1a56cbeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/7fdd437e not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/cdfb5652 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/fix-audit-permissions/fb93119e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver/f1e8fc0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver-check-endpoints/218511f3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server/serving-certs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/ca8af7b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/72cc8a75 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/6e8a3760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4c3455c0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/2278acb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4b453e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/3ec09bda not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2/cacerts.bin not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java/cacerts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl/ca-bundle.trust.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/email-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/objsign-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2ae6433e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fde84897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75680d2e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/openshift-service-serving-signer_1740288168.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/facfc4fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f5a969c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CFCA_EV_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9ef4a08a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ingress-operator_1740288202.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2f332aed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/248c8271.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d10a21f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ACCVRAIZ1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a94d09e5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c9a4d3b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40193066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd8c0d63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b936d1c6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CA_Disig_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4fd49c6c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM_SERVIDORES_SEGUROS.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b81b93f0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f9a69fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b30d5fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ANF_Secure_Server_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b433981b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93851c9e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9282e51c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7dd1bc4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Actalis_Authentication_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/930ac5d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f47b495.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e113c810.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5931b5bc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Commercial.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2b349938.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e48193cf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/302904dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a716d4ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Networking.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93bc0acc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/86212b19.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b727005e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbc54cab.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f51bb24c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c28a8a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9c8dfbd4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ccc52f49.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cb1c3204.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ce5e74ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd08c599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6d41d539.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb5fa911.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e35234b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8cb5ee0f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a7c655d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f8fc53da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/de6d66f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d41b5e2a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/41a3f684.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1df5a75f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_2011.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e36a6752.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b872f2b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9576d26b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/228f89db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_ECC_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb717492.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d21b73c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b1b94ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/595e996b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_RSA_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b46e03d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/128f4b91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_3_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81f2d2b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Autoridad_de_Certificacion_Firmaprofesional_CIF_A62634068.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3bde41ac.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d16a5865.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_EC-384_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0179095f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ffa7f1eb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9482e63a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4dae3dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e359ba6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7e067d03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/95aff9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7746a63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Baltimore_CyberTrust_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/653b494a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3ad48a91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_2_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/54657681.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/82223c44.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8de2f56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d9dafe4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d96b65e2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee64a828.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40547a79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5a3f0ff8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a780d93.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/34d996fb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/eed8c118.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/89c02a45.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b1159c4c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d6325660.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4c339cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8312c4c1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_E1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8508e720.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5fdd185d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48bec511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/69105f4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b9bc432.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/32888f65.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b03dec0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/219d9499.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5acf816d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbf06781.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc99f41e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AAA_Certificate_Services.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/985c1f52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8794b4e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_BR_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7c037b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ef954a4e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_EV_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2add47b6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/90c5a3c8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0f3e76e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/53a1b57a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_EV_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5ad8a5d6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/68dd7389.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d04f354.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d6437c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/062cdee6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bd43e1dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7f3d5d1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c491639e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3513523f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/399e7759.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/feffd413.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d18e9066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/607986c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c90bc37d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1b0f7e5c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e08bfd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dd8e9d41.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed39abd0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a3418fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bc3f2570.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_High_Assurance_EV_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/244b5494.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81b9768f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4be590e0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_ECC_P384_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9846683b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/252252d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e8e7201.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_RSA4096_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d52c538d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c44cc0c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Trusted_Root_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75d1b2ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a2c66da8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ecccd8db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust.net_Certification_Authority__2048_.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/aee5f10d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e7271e8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0e59380.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4c3982f2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b99d060.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf64f35b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0a775a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/002c0b4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cc450945.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_EC1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/106f3e4d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b3fb433b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4042bcee.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/02265526.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/455f1b52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0d69c7e1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9f727ac7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5e98733a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0cd152c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc4d6a89.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6187b673.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/FIRMAPROFESIONAL_CA_ROOT-A_WEB.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ba8887ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/068570d1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f081611a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48a195d8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GDCA_TrustAUTH_R5_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f6fa695.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab59055e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b92fd57f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GLOBALTRUST_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fa5da96b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ec40989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7719f463.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1001acf7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f013ecaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/626dceaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c559d742.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1d3472b9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9479c8c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a81e292b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4bfab552.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e071171e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/57bcb2da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_ECC_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab5346f4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5046c355.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_RSA_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/865fbdf9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da0cfd1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/85cde254.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_ECC_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbb3f32b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureSign_RootCA11.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5860aaa6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/31188b5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HiPKI_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c7f1359b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f15c80c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hongkong_Post_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/09789157.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/18856ac4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e09d511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Commercial_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cf701eeb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d06393bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Public_Sector_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/10531352.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Izenpe.com.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureTrust_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0ed035a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsec_e-Szigno_Root_CA_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8160b96c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8651083.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2c63f966.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_ECC_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d89cda1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/01419da9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_RSA_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7a5b843.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_RSA_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf53fb88.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9591a472.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3afde786.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Gold_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NAVER_Global_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3fb36b73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d39b0a2c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a89d74c2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd58d51e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7db1890.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NetLock_Arany__Class_Gold__F__tan__s__tv__ny.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/988a38cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/60afe812.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f39fc864.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5443e9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GB_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e73d606e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dfc0fe80.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b66938e9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e1eab7c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GC_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/773e07ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c899c73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d59297b8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ddcda989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_1_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/749e9e03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/52b525c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7e8dc79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a819ef2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/08063a00.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b483515.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/064e0aa9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1f58a078.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6f7454b3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7fa05551.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76faf6c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9339512a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f387163d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee37c333.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e18bfb83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e442e424.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fe8a2cd8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/23f4c490.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5cd81ad7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0c70a8d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7892ad52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SZAFIR_ROOT_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4f316efb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_RSA_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/06dc52d5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/583d0756.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0bf05006.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/88950faa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9046744a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c860d51.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_RSA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6fa5da56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/33ee480d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Secure_Global_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/63a2c897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_ECC_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bdacca6f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ff34af3f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbff3a01.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_ECC_RootCA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_C1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/406c9bb1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_C3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Services_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Silver_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/99e1b953.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/14bc7599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TUBITAK_Kamu_SM_SSL_Kok_Sertifikasi_-_Surum_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a3adc42.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f459871d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_ECC_Root_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_RSA_Root_2023.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TeliaSonera_Root_CA_v1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telia_Root_CA_v2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f103249.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f058632f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-certificates.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9bf03295.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/98aaf404.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1cef98f5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/073bfcc5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2923b3f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f249de83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/edcbddb5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P256_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b5697b0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ae85e5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b74d2bd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P384_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d887a5bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9aef356c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TunTrust_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd64f3fc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e13665f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Extended_Validation_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f5dc4f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da7377f6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Global_G2_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c01eb047.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/304d27c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed858448.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f30dd6ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/04f60c28.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_ECC_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fc5a8f99.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/35105088.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee532fd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/XRamp_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/706f604c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76579174.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:24 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d86cdd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/882de061.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f618aec.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a9d40e02.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e-Szigno_Root_CA_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e868b802.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/83e9984f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ePKI_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca6e4ad9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d6523ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4b718d9b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/869fbf79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/containers/registry/f8d22bdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/6e8bbfac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/54dd7996 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/a4f1bb05 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/207129da not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/c1df39e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/15b8f1cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/77bd6913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/2382c1b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/704ce128 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/70d16fe0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/bfb95535 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/57a8e8e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/1b9d3e5e not reset as customized by admin to system_u:object_r:container_file_t:s0:c107,c917 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/fddb173c not reset as customized by admin to system_u:object_r:container_file_t:s0:c202,c983 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/95d3c6c4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/bfb5fff5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/2aef40aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/c0391cad not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/1119e69d not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/660608b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/8220bd53 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/85f99d5c not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/4b0225f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/9c2a3394 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/e820b243 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/1ca52ea0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/e6988e45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/6655f00b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/98bc3986 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/08e3458a not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/2a191cb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/6c4eeefb not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/f61a549c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/24891863 not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/fbdfd89c not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/9b63b3bc not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/8acde6d6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/node-driver-registrar/59ecbba3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/csi-provisioner/685d4be3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/containers/route-controller-manager/feaea55e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/63709497 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/d966b7fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/f5773757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/81c9edb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/57bf57ee not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/86f5e6aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/0aabe31d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/d2af85c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/09d157d9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c0fe7256 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c30319e4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/e6b1dd45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/2bb643f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/920de426 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/70fa1e87 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/a1c12a2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/9442e6c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/5b45ec72 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/3c9f3a59 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/1091c11b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/9a6821c6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/ec0c35e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/517f37e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/6214fe78 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/ba189c8b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/351e4f31 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/c0f219ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/8069f607 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/559c3d82 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/605ad488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/148df488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/3bf6dcb4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/022a2feb not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/938c3924 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/729fe23e not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/1fd5cbd4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/a96697e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/e155ddca not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/10dd0e0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/6f2c8392 not reset as customized by admin to system_u:object_r:container_file_t:s0:c267,c588 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/bd241ad9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/plugins/csi-hostpath not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/plugins/csi-hostpath/csi.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/plugins/kubernetes.io not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/plugins/kubernetes.io/csi not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983 not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/vol_data.json not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 29 12:06:25 crc restorecon[4697]: /var/lib/kubelet/plugins_registry not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 29 12:06:25 crc restorecon[4697]: Relabeled /var/usrlocal/bin/kubenswrapper from system_u:object_r:bin_t:s0 to system_u:object_r:kubelet_exec_t:s0 Jan 29 12:06:27 crc kubenswrapper[4753]: Flag --container-runtime-endpoint has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Jan 29 12:06:27 crc kubenswrapper[4753]: Flag --minimum-container-ttl-duration has been deprecated, Use --eviction-hard or --eviction-soft instead. Will be removed in a future version. Jan 29 12:06:27 crc kubenswrapper[4753]: Flag --volume-plugin-dir has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Jan 29 12:06:27 crc kubenswrapper[4753]: Flag --register-with-taints has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Jan 29 12:06:27 crc kubenswrapper[4753]: Flag --pod-infra-container-image has been deprecated, will be removed in a future release. Image garbage collector will get sandbox image information from CRI. Jan 29 12:06:27 crc kubenswrapper[4753]: Flag --system-reserved has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.129666 4753 server.go:211] "--pod-infra-container-image will not be pruned by the image garbage collector in kubelet and should also be set in the remote runtime" Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.143545 4753 feature_gate.go:330] unrecognized feature gate: Example Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.143587 4753 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.143594 4753 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.143600 4753 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.143606 4753 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.143613 4753 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.143620 4753 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.143625 4753 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.143630 4753 feature_gate.go:330] unrecognized feature gate: SignatureStores Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.143635 4753 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.143641 4753 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.143647 4753 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.143654 4753 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.143662 4753 feature_gate.go:330] unrecognized feature gate: GatewayAPI Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.143669 4753 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.143676 4753 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.143682 4753 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.143687 4753 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.143692 4753 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.143697 4753 feature_gate.go:330] unrecognized feature gate: OVNObservability Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.143702 4753 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.143707 4753 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.143712 4753 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.143717 4753 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.143724 4753 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.143729 4753 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.143734 4753 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.143739 4753 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.143744 4753 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.143749 4753 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.143754 4753 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.143760 4753 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.143765 4753 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.143770 4753 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.143775 4753 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.143780 4753 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.143786 4753 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.143791 4753 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.143796 4753 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.143801 4753 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.143806 4753 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.143812 4753 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.143817 4753 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.143822 4753 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.143832 4753 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.143838 4753 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.143845 4753 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.143851 4753 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.143857 4753 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.143863 4753 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.143869 4753 feature_gate.go:330] unrecognized feature gate: InsightsConfig Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.143875 4753 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.143880 4753 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.143886 4753 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.143891 4753 feature_gate.go:330] unrecognized feature gate: PlatformOperators Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.143896 4753 feature_gate.go:330] unrecognized feature gate: PinnedImages Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.143902 4753 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.143908 4753 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.143913 4753 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.143918 4753 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.143923 4753 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.143928 4753 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.143933 4753 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.143938 4753 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.143945 4753 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.143951 4753 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.143957 4753 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.143965 4753 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.143972 4753 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.143978 4753 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.143984 4753 feature_gate.go:330] unrecognized feature gate: NewOLM Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144146 4753 flags.go:64] FLAG: --address="0.0.0.0" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144164 4753 flags.go:64] FLAG: --allowed-unsafe-sysctls="[]" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144175 4753 flags.go:64] FLAG: --anonymous-auth="true" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144183 4753 flags.go:64] FLAG: --application-metrics-count-limit="100" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144193 4753 flags.go:64] FLAG: --authentication-token-webhook="false" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144201 4753 flags.go:64] FLAG: --authentication-token-webhook-cache-ttl="2m0s" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144210 4753 flags.go:64] FLAG: --authorization-mode="AlwaysAllow" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144218 4753 flags.go:64] FLAG: --authorization-webhook-cache-authorized-ttl="5m0s" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144246 4753 flags.go:64] FLAG: --authorization-webhook-cache-unauthorized-ttl="30s" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144260 4753 flags.go:64] FLAG: --boot-id-file="/proc/sys/kernel/random/boot_id" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144273 4753 flags.go:64] FLAG: --bootstrap-kubeconfig="/etc/kubernetes/kubeconfig" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144285 4753 flags.go:64] FLAG: --cert-dir="/var/lib/kubelet/pki" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144294 4753 flags.go:64] FLAG: --cgroup-driver="cgroupfs" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144301 4753 flags.go:64] FLAG: --cgroup-root="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144308 4753 flags.go:64] FLAG: --cgroups-per-qos="true" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144316 4753 flags.go:64] FLAG: --client-ca-file="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144324 4753 flags.go:64] FLAG: --cloud-config="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144334 4753 flags.go:64] FLAG: --cloud-provider="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144340 4753 flags.go:64] FLAG: --cluster-dns="[]" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144349 4753 flags.go:64] FLAG: --cluster-domain="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144355 4753 flags.go:64] FLAG: --config="/etc/kubernetes/kubelet.conf" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144361 4753 flags.go:64] FLAG: --config-dir="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144367 4753 flags.go:64] FLAG: --container-hints="/etc/cadvisor/container_hints.json" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144374 4753 flags.go:64] FLAG: --container-log-max-files="5" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144383 4753 flags.go:64] FLAG: --container-log-max-size="10Mi" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144389 4753 flags.go:64] FLAG: --container-runtime-endpoint="/var/run/crio/crio.sock" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144396 4753 flags.go:64] FLAG: --containerd="/run/containerd/containerd.sock" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144402 4753 flags.go:64] FLAG: --containerd-namespace="k8s.io" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144409 4753 flags.go:64] FLAG: --contention-profiling="false" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144415 4753 flags.go:64] FLAG: --cpu-cfs-quota="true" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144421 4753 flags.go:64] FLAG: --cpu-cfs-quota-period="100ms" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144428 4753 flags.go:64] FLAG: --cpu-manager-policy="none" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144434 4753 flags.go:64] FLAG: --cpu-manager-policy-options="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144443 4753 flags.go:64] FLAG: --cpu-manager-reconcile-period="10s" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144449 4753 flags.go:64] FLAG: --enable-controller-attach-detach="true" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144456 4753 flags.go:64] FLAG: --enable-debugging-handlers="true" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144462 4753 flags.go:64] FLAG: --enable-load-reader="false" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144468 4753 flags.go:64] FLAG: --enable-server="true" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144475 4753 flags.go:64] FLAG: --enforce-node-allocatable="[pods]" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144483 4753 flags.go:64] FLAG: --event-burst="100" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144491 4753 flags.go:64] FLAG: --event-qps="50" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144500 4753 flags.go:64] FLAG: --event-storage-age-limit="default=0" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144517 4753 flags.go:64] FLAG: --event-storage-event-limit="default=0" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144525 4753 flags.go:64] FLAG: --eviction-hard="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144536 4753 flags.go:64] FLAG: --eviction-max-pod-grace-period="0" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144544 4753 flags.go:64] FLAG: --eviction-minimum-reclaim="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144552 4753 flags.go:64] FLAG: --eviction-pressure-transition-period="5m0s" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144563 4753 flags.go:64] FLAG: --eviction-soft="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144571 4753 flags.go:64] FLAG: --eviction-soft-grace-period="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144600 4753 flags.go:64] FLAG: --exit-on-lock-contention="false" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144608 4753 flags.go:64] FLAG: --experimental-allocatable-ignore-eviction="false" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144615 4753 flags.go:64] FLAG: --experimental-mounter-path="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144622 4753 flags.go:64] FLAG: --fail-cgroupv1="false" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144629 4753 flags.go:64] FLAG: --fail-swap-on="true" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144637 4753 flags.go:64] FLAG: --feature-gates="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144653 4753 flags.go:64] FLAG: --file-check-frequency="20s" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144666 4753 flags.go:64] FLAG: --global-housekeeping-interval="1m0s" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144675 4753 flags.go:64] FLAG: --hairpin-mode="promiscuous-bridge" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144683 4753 flags.go:64] FLAG: --healthz-bind-address="127.0.0.1" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144692 4753 flags.go:64] FLAG: --healthz-port="10248" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144700 4753 flags.go:64] FLAG: --help="false" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144708 4753 flags.go:64] FLAG: --hostname-override="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144716 4753 flags.go:64] FLAG: --housekeeping-interval="10s" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144724 4753 flags.go:64] FLAG: --http-check-frequency="20s" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144731 4753 flags.go:64] FLAG: --image-credential-provider-bin-dir="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144737 4753 flags.go:64] FLAG: --image-credential-provider-config="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144743 4753 flags.go:64] FLAG: --image-gc-high-threshold="85" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144749 4753 flags.go:64] FLAG: --image-gc-low-threshold="80" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144756 4753 flags.go:64] FLAG: --image-service-endpoint="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144762 4753 flags.go:64] FLAG: --kernel-memcg-notification="false" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144768 4753 flags.go:64] FLAG: --kube-api-burst="100" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144774 4753 flags.go:64] FLAG: --kube-api-content-type="application/vnd.kubernetes.protobuf" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144781 4753 flags.go:64] FLAG: --kube-api-qps="50" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144787 4753 flags.go:64] FLAG: --kube-reserved="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144793 4753 flags.go:64] FLAG: --kube-reserved-cgroup="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144799 4753 flags.go:64] FLAG: --kubeconfig="/var/lib/kubelet/kubeconfig" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144805 4753 flags.go:64] FLAG: --kubelet-cgroups="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144811 4753 flags.go:64] FLAG: --local-storage-capacity-isolation="true" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144817 4753 flags.go:64] FLAG: --lock-file="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144823 4753 flags.go:64] FLAG: --log-cadvisor-usage="false" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144830 4753 flags.go:64] FLAG: --log-flush-frequency="5s" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144837 4753 flags.go:64] FLAG: --log-json-info-buffer-size="0" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144847 4753 flags.go:64] FLAG: --log-json-split-stream="false" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144855 4753 flags.go:64] FLAG: --log-text-info-buffer-size="0" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144861 4753 flags.go:64] FLAG: --log-text-split-stream="false" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144867 4753 flags.go:64] FLAG: --logging-format="text" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144873 4753 flags.go:64] FLAG: --machine-id-file="/etc/machine-id,/var/lib/dbus/machine-id" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144880 4753 flags.go:64] FLAG: --make-iptables-util-chains="true" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144886 4753 flags.go:64] FLAG: --manifest-url="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144892 4753 flags.go:64] FLAG: --manifest-url-header="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144901 4753 flags.go:64] FLAG: --max-housekeeping-interval="15s" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144907 4753 flags.go:64] FLAG: --max-open-files="1000000" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144914 4753 flags.go:64] FLAG: --max-pods="110" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144920 4753 flags.go:64] FLAG: --maximum-dead-containers="-1" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144928 4753 flags.go:64] FLAG: --maximum-dead-containers-per-container="1" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144934 4753 flags.go:64] FLAG: --memory-manager-policy="None" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144940 4753 flags.go:64] FLAG: --minimum-container-ttl-duration="6m0s" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144946 4753 flags.go:64] FLAG: --minimum-image-ttl-duration="2m0s" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144953 4753 flags.go:64] FLAG: --node-ip="192.168.126.11" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.144959 4753 flags.go:64] FLAG: --node-labels="node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.openshift.io/os_id=rhcos" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.145001 4753 flags.go:64] FLAG: --node-status-max-images="50" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.145008 4753 flags.go:64] FLAG: --node-status-update-frequency="10s" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.145014 4753 flags.go:64] FLAG: --oom-score-adj="-999" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.145020 4753 flags.go:64] FLAG: --pod-cidr="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.145026 4753 flags.go:64] FLAG: --pod-infra-container-image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:33549946e22a9ffa738fd94b1345f90921bc8f92fa6137784cb33c77ad806f9d" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.145035 4753 flags.go:64] FLAG: --pod-manifest-path="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.145041 4753 flags.go:64] FLAG: --pod-max-pids="-1" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.145047 4753 flags.go:64] FLAG: --pods-per-core="0" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.145054 4753 flags.go:64] FLAG: --port="10250" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.145060 4753 flags.go:64] FLAG: --protect-kernel-defaults="false" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.145066 4753 flags.go:64] FLAG: --provider-id="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.145072 4753 flags.go:64] FLAG: --qos-reserved="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.145079 4753 flags.go:64] FLAG: --read-only-port="10255" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.145085 4753 flags.go:64] FLAG: --register-node="true" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.145091 4753 flags.go:64] FLAG: --register-schedulable="true" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.145097 4753 flags.go:64] FLAG: --register-with-taints="node-role.kubernetes.io/master=:NoSchedule" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.145108 4753 flags.go:64] FLAG: --registry-burst="10" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.145114 4753 flags.go:64] FLAG: --registry-qps="5" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.145120 4753 flags.go:64] FLAG: --reserved-cpus="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.145127 4753 flags.go:64] FLAG: --reserved-memory="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.145135 4753 flags.go:64] FLAG: --resolv-conf="/etc/resolv.conf" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.145141 4753 flags.go:64] FLAG: --root-dir="/var/lib/kubelet" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.145148 4753 flags.go:64] FLAG: --rotate-certificates="false" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.145154 4753 flags.go:64] FLAG: --rotate-server-certificates="false" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.145160 4753 flags.go:64] FLAG: --runonce="false" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.145166 4753 flags.go:64] FLAG: --runtime-cgroups="/system.slice/crio.service" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.145172 4753 flags.go:64] FLAG: --runtime-request-timeout="2m0s" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.145179 4753 flags.go:64] FLAG: --seccomp-default="false" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.145185 4753 flags.go:64] FLAG: --serialize-image-pulls="true" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.145191 4753 flags.go:64] FLAG: --storage-driver-buffer-duration="1m0s" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.145197 4753 flags.go:64] FLAG: --storage-driver-db="cadvisor" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.145209 4753 flags.go:64] FLAG: --storage-driver-host="localhost:8086" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.145215 4753 flags.go:64] FLAG: --storage-driver-password="root" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.145243 4753 flags.go:64] FLAG: --storage-driver-secure="false" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.145250 4753 flags.go:64] FLAG: --storage-driver-table="stats" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.145256 4753 flags.go:64] FLAG: --storage-driver-user="root" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.145262 4753 flags.go:64] FLAG: --streaming-connection-idle-timeout="4h0m0s" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.145269 4753 flags.go:64] FLAG: --sync-frequency="1m0s" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.145275 4753 flags.go:64] FLAG: --system-cgroups="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.145281 4753 flags.go:64] FLAG: --system-reserved="cpu=200m,ephemeral-storage=350Mi,memory=350Mi" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.145291 4753 flags.go:64] FLAG: --system-reserved-cgroup="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.145297 4753 flags.go:64] FLAG: --tls-cert-file="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.145303 4753 flags.go:64] FLAG: --tls-cipher-suites="[]" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.145312 4753 flags.go:64] FLAG: --tls-min-version="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.145318 4753 flags.go:64] FLAG: --tls-private-key-file="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.145324 4753 flags.go:64] FLAG: --topology-manager-policy="none" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.145330 4753 flags.go:64] FLAG: --topology-manager-policy-options="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.145337 4753 flags.go:64] FLAG: --topology-manager-scope="container" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.145343 4753 flags.go:64] FLAG: --v="2" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.145351 4753 flags.go:64] FLAG: --version="false" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.145362 4753 flags.go:64] FLAG: --vmodule="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.145371 4753 flags.go:64] FLAG: --volume-plugin-dir="/etc/kubernetes/kubelet-plugins/volume/exec" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.145379 4753 flags.go:64] FLAG: --volume-stats-agg-period="1m0s" Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.145525 4753 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.145532 4753 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.145548 4753 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.145554 4753 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.145559 4753 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.145565 4753 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.145571 4753 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.145576 4753 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.145582 4753 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.145587 4753 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.145593 4753 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.145600 4753 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.145622 4753 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.145633 4753 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.145640 4753 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.145646 4753 feature_gate.go:330] unrecognized feature gate: PinnedImages Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.145651 4753 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.145656 4753 feature_gate.go:330] unrecognized feature gate: NewOLM Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.145662 4753 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.145667 4753 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.145672 4753 feature_gate.go:330] unrecognized feature gate: SignatureStores Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.145678 4753 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.145684 4753 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.145688 4753 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.145692 4753 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.145697 4753 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.145701 4753 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.145708 4753 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.145714 4753 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.145719 4753 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.145724 4753 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.145729 4753 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.145733 4753 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.145760 4753 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.145771 4753 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.145779 4753 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.145786 4753 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.145791 4753 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.145805 4753 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.145810 4753 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.145815 4753 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.145821 4753 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.145825 4753 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.145829 4753 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.145838 4753 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.145842 4753 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.145846 4753 feature_gate.go:330] unrecognized feature gate: PlatformOperators Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.145851 4753 feature_gate.go:330] unrecognized feature gate: Example Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.145856 4753 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.145862 4753 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.145867 4753 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.145872 4753 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.145877 4753 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.145882 4753 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.145887 4753 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.145892 4753 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.145896 4753 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.145901 4753 feature_gate.go:330] unrecognized feature gate: InsightsConfig Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.145906 4753 feature_gate.go:330] unrecognized feature gate: OVNObservability Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.145910 4753 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.145915 4753 feature_gate.go:330] unrecognized feature gate: GatewayAPI Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.145919 4753 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.145924 4753 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.145928 4753 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.145937 4753 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.145943 4753 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.145947 4753 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.145952 4753 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.145957 4753 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.145962 4753 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.145966 4753 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.145990 4753 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.192946 4753 server.go:491] "Kubelet version" kubeletVersion="v1.31.5" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.193018 4753 server.go:493] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.193158 4753 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.193175 4753 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.193189 4753 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.193199 4753 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.193208 4753 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.193216 4753 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.193257 4753 feature_gate.go:330] unrecognized feature gate: GatewayAPI Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.193268 4753 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.193279 4753 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.193287 4753 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.193295 4753 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.193303 4753 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.193310 4753 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.193320 4753 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.193328 4753 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.193335 4753 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.193343 4753 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.193351 4753 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.193359 4753 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.193368 4753 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.193376 4753 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.193383 4753 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.193391 4753 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.193399 4753 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.193407 4753 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.193415 4753 feature_gate.go:330] unrecognized feature gate: InsightsConfig Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.193422 4753 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.193430 4753 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.193438 4753 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.193446 4753 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.193455 4753 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.193464 4753 feature_gate.go:330] unrecognized feature gate: Example Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.193475 4753 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.193486 4753 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.193498 4753 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.193506 4753 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.193514 4753 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.193522 4753 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.193529 4753 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.193537 4753 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.193545 4753 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.193553 4753 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.193560 4753 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.193570 4753 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.193578 4753 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.193589 4753 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.193599 4753 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.193610 4753 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.193619 4753 feature_gate.go:330] unrecognized feature gate: NewOLM Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.193627 4753 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.193635 4753 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.193643 4753 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.193651 4753 feature_gate.go:330] unrecognized feature gate: PlatformOperators Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.193659 4753 feature_gate.go:330] unrecognized feature gate: SignatureStores Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.193669 4753 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.193679 4753 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.193687 4753 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.193695 4753 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.193703 4753 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.193711 4753 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.193719 4753 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.193727 4753 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.193735 4753 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.193743 4753 feature_gate.go:330] unrecognized feature gate: PinnedImages Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.193752 4753 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.193761 4753 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.193769 4753 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.193777 4753 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.193785 4753 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.193793 4753 feature_gate.go:330] unrecognized feature gate: OVNObservability Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.193802 4753 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.193817 4753 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.194152 4753 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.194167 4753 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.194178 4753 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.194190 4753 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.194200 4753 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.194209 4753 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.194217 4753 feature_gate.go:330] unrecognized feature gate: NewOLM Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.194259 4753 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.194271 4753 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.194282 4753 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.194293 4753 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.194302 4753 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.194310 4753 feature_gate.go:330] unrecognized feature gate: InsightsConfig Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.194318 4753 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.194327 4753 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.194334 4753 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.194342 4753 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.194350 4753 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.194357 4753 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.194365 4753 feature_gate.go:330] unrecognized feature gate: PinnedImages Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.194373 4753 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.194381 4753 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.194388 4753 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.194396 4753 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.194404 4753 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.194412 4753 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.194421 4753 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.194429 4753 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.194437 4753 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.194447 4753 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.194457 4753 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.194465 4753 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.194475 4753 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.194485 4753 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.194498 4753 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.194507 4753 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.194515 4753 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.194523 4753 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.194533 4753 feature_gate.go:330] unrecognized feature gate: OVNObservability Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.194542 4753 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.194553 4753 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.194563 4753 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.194572 4753 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.194581 4753 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.194591 4753 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.194601 4753 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.194612 4753 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.194621 4753 feature_gate.go:330] unrecognized feature gate: Example Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.194629 4753 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.194637 4753 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.194646 4753 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.194653 4753 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.194663 4753 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.194673 4753 feature_gate.go:330] unrecognized feature gate: GatewayAPI Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.194682 4753 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.194691 4753 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.194699 4753 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.194708 4753 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.194717 4753 feature_gate.go:330] unrecognized feature gate: PlatformOperators Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.194725 4753 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.194734 4753 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.194743 4753 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.194751 4753 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.194759 4753 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.194767 4753 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.194774 4753 feature_gate.go:330] unrecognized feature gate: SignatureStores Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.194782 4753 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.194790 4753 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.194797 4753 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.194805 4753 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.194814 4753 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.194827 4753 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.195138 4753 server.go:940] "Client rotation is on, will bootstrap in background" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.236589 4753 bootstrap.go:85] "Current kubeconfig file contents are still valid, no bootstrap necessary" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.236828 4753 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-client-current.pem". Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.238867 4753 server.go:997] "Starting client certificate rotation" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.238904 4753 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate rotation is enabled Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.239094 4753 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-02-24 05:52:08 +0000 UTC, rotation deadline is 2025-12-20 07:45:11.55035867 +0000 UTC Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.239160 4753 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.270905 4753 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.274384 4753 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Jan 29 12:06:27 crc kubenswrapper[4753]: E0129 12:06:27.275724 4753 certificate_manager.go:562] "Unhandled Error" err="kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post \"https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests\": dial tcp 38.129.56.211:6443: connect: connection refused" logger="UnhandledError" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.296110 4753 log.go:25] "Validated CRI v1 runtime API" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.394953 4753 log.go:25] "Validated CRI v1 image API" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.397480 4753 server.go:1437] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.426941 4753 fs.go:133] Filesystem UUIDs: map[0b076daa-c26a-46d2-b3a6-72a8dbc6e257:/dev/vda4 2026-01-29-12-01-59-00:/dev/sr0 7B77-95E7:/dev/vda2 de0497b0-db1b-465a-b278-03db02455c71:/dev/vda3] Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.427053 4753 fs.go:134] Filesystem partitions: map[/dev/shm:{mountpoint:/dev/shm major:0 minor:22 fsType:tmpfs blockSize:0} /dev/vda3:{mountpoint:/boot major:252 minor:3 fsType:ext4 blockSize:0} /dev/vda4:{mountpoint:/var major:252 minor:4 fsType:xfs blockSize:0} /run:{mountpoint:/run major:0 minor:24 fsType:tmpfs blockSize:0} /run/user/1000:{mountpoint:/run/user/1000 major:0 minor:42 fsType:tmpfs blockSize:0} /tmp:{mountpoint:/tmp major:0 minor:30 fsType:tmpfs blockSize:0} /var/lib/etcd:{mountpoint:/var/lib/etcd major:0 minor:43 fsType:tmpfs blockSize:0}] Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.476493 4753 manager.go:217] Machine: {Timestamp:2026-01-29 12:06:27.448340963 +0000 UTC m=+1.700422418 CPUVendorID:AuthenticAMD NumCores:12 NumPhysicalCores:1 NumSockets:12 CpuFrequency:2800000 MemoryCapacity:33654124544 SwapCapacity:0 MemoryByType:map[] NVMInfo:{MemoryModeCapacity:0 AppDirectModeCapacity:0 AvgPowerBudget:0} HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] MachineID:21801e6708c44f15b81395eb736a7cec SystemUUID:ce89c1ce-259c-4b78-b348-3ef96afb6944 BootID:7536a030-b66d-4df9-a1ec-9890f7ad99e7 Filesystems:[{Device:/dev/vda4 DeviceMajor:252 DeviceMinor:4 Capacity:85292941312 Type:vfs Inodes:41679680 HasInodes:true} {Device:/tmp DeviceMajor:0 DeviceMinor:30 Capacity:16827064320 Type:vfs Inodes:1048576 HasInodes:true} {Device:/dev/vda3 DeviceMajor:252 DeviceMinor:3 Capacity:366869504 Type:vfs Inodes:98304 HasInodes:true} {Device:/run/user/1000 DeviceMajor:0 DeviceMinor:42 Capacity:3365408768 Type:vfs Inodes:821633 HasInodes:true} {Device:/var/lib/etcd DeviceMajor:0 DeviceMinor:43 Capacity:1073741824 Type:vfs Inodes:4108169 HasInodes:true} {Device:/dev/shm DeviceMajor:0 DeviceMinor:22 Capacity:16827060224 Type:vfs Inodes:4108169 HasInodes:true} {Device:/run DeviceMajor:0 DeviceMinor:24 Capacity:6730825728 Type:vfs Inodes:819200 HasInodes:true}] DiskMap:map[252:0:{Name:vda Major:252 Minor:0 Size:214748364800 Scheduler:none}] NetworkDevices:[{Name:br-ex MacAddress:fa:16:3e:ad:17:c3 Speed:0 Mtu:1500} {Name:br-int MacAddress:d6:39:55:2e:22:71 Speed:0 Mtu:1400} {Name:ens3 MacAddress:fa:16:3e:ad:17:c3 Speed:-1 Mtu:1500} {Name:ens7 MacAddress:fa:16:3e:ac:80:39 Speed:-1 Mtu:1500} {Name:ens7.20 MacAddress:52:54:00:90:3f:42 Speed:-1 Mtu:1496} {Name:ens7.21 MacAddress:52:54:00:50:11:cb Speed:-1 Mtu:1496} {Name:ens7.22 MacAddress:52:54:00:c8:a2:1e Speed:-1 Mtu:1496} {Name:eth10 MacAddress:46:f2:df:d1:01:91 Speed:0 Mtu:1500} {Name:ovn-k8s-mp0 MacAddress:0a:58:0a:d9:00:02 Speed:0 Mtu:1400} {Name:ovs-system MacAddress:16:2e:11:61:c3:84 Speed:0 Mtu:1500}] Topology:[{Id:0 Memory:33654124544 HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] Cores:[{Id:0 Threads:[0] Caches:[{Id:0 Size:32768 Type:Data Level:1} {Id:0 Size:32768 Type:Instruction Level:1} {Id:0 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:0 Size:16777216 Type:Unified Level:3}] SocketID:0 BookID: DrawerID:} {Id:0 Threads:[1] Caches:[{Id:1 Size:32768 Type:Data Level:1} {Id:1 Size:32768 Type:Instruction Level:1} {Id:1 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:1 Size:16777216 Type:Unified Level:3}] SocketID:1 BookID: DrawerID:} {Id:0 Threads:[10] Caches:[{Id:10 Size:32768 Type:Data Level:1} {Id:10 Size:32768 Type:Instruction Level:1} {Id:10 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:10 Size:16777216 Type:Unified Level:3}] SocketID:10 BookID: DrawerID:} {Id:0 Threads:[11] Caches:[{Id:11 Size:32768 Type:Data Level:1} {Id:11 Size:32768 Type:Instruction Level:1} {Id:11 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:11 Size:16777216 Type:Unified Level:3}] SocketID:11 BookID: DrawerID:} {Id:0 Threads:[2] Caches:[{Id:2 Size:32768 Type:Data Level:1} {Id:2 Size:32768 Type:Instruction Level:1} {Id:2 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:2 Size:16777216 Type:Unified Level:3}] SocketID:2 BookID: DrawerID:} {Id:0 Threads:[3] Caches:[{Id:3 Size:32768 Type:Data Level:1} {Id:3 Size:32768 Type:Instruction Level:1} {Id:3 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:3 Size:16777216 Type:Unified Level:3}] SocketID:3 BookID: DrawerID:} {Id:0 Threads:[4] Caches:[{Id:4 Size:32768 Type:Data Level:1} {Id:4 Size:32768 Type:Instruction Level:1} {Id:4 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:4 Size:16777216 Type:Unified Level:3}] SocketID:4 BookID: DrawerID:} {Id:0 Threads:[5] Caches:[{Id:5 Size:32768 Type:Data Level:1} {Id:5 Size:32768 Type:Instruction Level:1} {Id:5 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:5 Size:16777216 Type:Unified Level:3}] SocketID:5 BookID: DrawerID:} {Id:0 Threads:[6] Caches:[{Id:6 Size:32768 Type:Data Level:1} {Id:6 Size:32768 Type:Instruction Level:1} {Id:6 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:6 Size:16777216 Type:Unified Level:3}] SocketID:6 BookID: DrawerID:} {Id:0 Threads:[7] Caches:[{Id:7 Size:32768 Type:Data Level:1} {Id:7 Size:32768 Type:Instruction Level:1} {Id:7 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:7 Size:16777216 Type:Unified Level:3}] SocketID:7 BookID: DrawerID:} {Id:0 Threads:[8] Caches:[{Id:8 Size:32768 Type:Data Level:1} {Id:8 Size:32768 Type:Instruction Level:1} {Id:8 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:8 Size:16777216 Type:Unified Level:3}] SocketID:8 BookID: DrawerID:} {Id:0 Threads:[9] Caches:[{Id:9 Size:32768 Type:Data Level:1} {Id:9 Size:32768 Type:Instruction Level:1} {Id:9 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:9 Size:16777216 Type:Unified Level:3}] SocketID:9 BookID: DrawerID:}] Caches:[] Distances:[10]}] CloudProvider:Unknown InstanceType:Unknown InstanceID:None} Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.477011 4753 manager_no_libpfm.go:29] cAdvisor is build without cgo and/or libpfm support. Perf event counters are not available. Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.477364 4753 manager.go:233] Version: {KernelVersion:5.14.0-427.50.2.el9_4.x86_64 ContainerOsVersion:Red Hat Enterprise Linux CoreOS 418.94.202502100215-0 DockerVersion: DockerAPIVersion: CadvisorVersion: CadvisorRevision:} Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.478246 4753 swap_util.go:113] "Swap is on" /proc/swaps contents="Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.478568 4753 container_manager_linux.go:267] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.478647 4753 container_manager_linux.go:272] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"crc","RuntimeCgroupsName":"/system.slice/crio.service","SystemCgroupsName":"/system.slice","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":true,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":{"cpu":"200m","ephemeral-storage":"350Mi","memory":"350Mi"},"HardEvictionThresholds":[{"Signal":"nodefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.15},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"memory.available","Operator":"LessThan","Value":{"Quantity":"100Mi","Percentage":0},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.1},"GracePeriod":0,"MinReclaim":null}],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"ExperimentalMemoryManagerPolicy":"None","ExperimentalMemoryManagerReservedMemory":null,"PodPidsLimit":4096,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.479076 4753 topology_manager.go:138] "Creating topology manager with none policy" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.479092 4753 container_manager_linux.go:303] "Creating device plugin manager" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.479494 4753 manager.go:142] "Creating Device Plugin manager" path="/var/lib/kubelet/device-plugins/kubelet.sock" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.479551 4753 server.go:66] "Creating device plugin registration server" version="v1beta1" socket="/var/lib/kubelet/device-plugins/kubelet.sock" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.479854 4753 state_mem.go:36] "Initialized new in-memory state store" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.479973 4753 server.go:1245] "Using root directory" path="/var/lib/kubelet" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.494069 4753 kubelet.go:418] "Attempting to sync node with API server" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.494157 4753 kubelet.go:313] "Adding static pod path" path="/etc/kubernetes/manifests" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.494329 4753 file.go:69] "Watching path" path="/etc/kubernetes/manifests" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.494361 4753 kubelet.go:324] "Adding apiserver pod source" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.494412 4753 apiserver.go:42] "Waiting for node sync before watching apiserver pods" Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.514861 4753 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.129.56.211:6443: connect: connection refused Jan 29 12:06:27 crc kubenswrapper[4753]: E0129 12:06:27.515072 4753 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.129.56.211:6443: connect: connection refused" logger="UnhandledError" Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.515219 4753 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.129.56.211:6443: connect: connection refused Jan 29 12:06:27 crc kubenswrapper[4753]: E0129 12:06:27.515356 4753 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.129.56.211:6443: connect: connection refused" logger="UnhandledError" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.520520 4753 kuberuntime_manager.go:262] "Container runtime initialized" containerRuntime="cri-o" version="1.31.5-4.rhaos4.18.gitdad78d5.el9" apiVersion="v1" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.527928 4753 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-server-current.pem". Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.530610 4753 kubelet.go:854] "Not starting ClusterTrustBundle informer because we are in static kubelet mode" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.542436 4753 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/portworx-volume" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.542483 4753 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/empty-dir" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.542498 4753 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/git-repo" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.542510 4753 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/host-path" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.542527 4753 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/nfs" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.542539 4753 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/secret" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.542553 4753 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/iscsi" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.542603 4753 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/downward-api" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.542622 4753 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/fc" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.542659 4753 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/configmap" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.542693 4753 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/projected" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.542710 4753 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/local-volume" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.549620 4753 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/csi" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.551091 4753 server.go:1280] "Started kubelet" Jan 29 12:06:27 crc systemd[1]: Started Kubernetes Kubelet. Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.551072 4753 server.go:163] "Starting to listen" address="0.0.0.0" port=10250 Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.555679 4753 ratelimit.go:55] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.558240 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate rotation is enabled Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.558421 4753 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.728154 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-21 13:59:31.343891088 +0000 UTC Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.728291 4753 server.go:236] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.728540 4753 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.129.56.211:6443: connect: connection refused Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.728727 4753 volume_manager.go:287] "The desired_state_of_world populator starts" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.728776 4753 volume_manager.go:289] "Starting Kubelet Volume Manager" Jan 29 12:06:27 crc kubenswrapper[4753]: E0129 12:06:27.728704 4753 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.728816 4753 desired_state_of_world_populator.go:146] "Desired state populator starts to run" Jan 29 12:06:27 crc kubenswrapper[4753]: E0129 12:06:27.729402 4753 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.129.56.211:6443: connect: connection refused" interval="200ms" Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.729523 4753 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.129.56.211:6443: connect: connection refused Jan 29 12:06:27 crc kubenswrapper[4753]: E0129 12:06:27.729713 4753 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.129.56.211:6443: connect: connection refused" logger="UnhandledError" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.730596 4753 factory.go:55] Registering systemd factory Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.730651 4753 factory.go:221] Registration of the systemd container factory successfully Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.731187 4753 factory.go:153] Registering CRI-O factory Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.731212 4753 factory.go:221] Registration of the crio container factory successfully Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.731612 4753 factory.go:219] Registration of the containerd container factory failed: unable to create containerd client: containerd: cannot unix dial containerd api service: dial unix /run/containerd/containerd.sock: connect: no such file or directory Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.731705 4753 factory.go:103] Registering Raw factory Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.731771 4753 manager.go:1196] Started watching for new ooms in manager Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.732764 4753 manager.go:319] Starting recovery of all containers Jan 29 12:06:27 crc kubenswrapper[4753]: E0129 12:06:27.731299 4753 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.129.56.211:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.188f323657180095 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-29 12:06:27.550707861 +0000 UTC m=+1.802789326,LastTimestamp:2026-01-29 12:06:27.550707861 +0000 UTC m=+1.802789326,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.749646 4753 server.go:460] "Adding debug handlers to kubelet server" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.752652 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.752706 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.752719 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.752730 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.752739 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.752746 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.752767 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.752775 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.752793 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.752802 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.752810 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.752819 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.752830 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.752843 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.752859 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.752868 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49ef4625-1d3a-4a9f-b595-c2433d32326d" volumeName="kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.752878 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.752890 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.752900 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.752913 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d751cbb-f2e2-430d-9754-c882a5e924a5" volumeName="kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.752922 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.752935 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.752951 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.752963 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.752975 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.752989 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.753005 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.753020 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.753032 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.753043 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.753062 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.753072 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.753083 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.753093 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.753103 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.753114 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.753123 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.753133 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.753147 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.753159 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.753168 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.753179 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.753188 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.753197 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.753207 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.753217 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.753247 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" volumeName="kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.753257 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.753270 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.753280 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.753289 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.753298 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.753316 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.753327 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.753339 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.753348 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.753358 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.753370 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.753391 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.753413 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.753431 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.753443 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.753462 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.753476 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.753485 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.753501 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.753517 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.753536 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.753548 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.753708 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.753768 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.753781 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.753794 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.753809 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.753821 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.753834 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.753846 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.753866 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.753885 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.753898 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.753911 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.753925 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.753936 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.753947 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.753959 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.753972 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.753989 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.754003 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.754016 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.754028 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.754039 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.754059 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.754072 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.754086 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.754104 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.754115 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.754127 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.754139 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.754152 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.754180 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.754195 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.754207 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.754240 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.754252 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.754307 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.754322 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.754342 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.754355 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.754383 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.754397 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.754431 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.754451 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.754466 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="44663579-783b-4372-86d6-acf235a62d72" volumeName="kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.754487 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.754500 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.754513 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.754526 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.754639 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.754658 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.754672 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.754683 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.754694 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.754707 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3b6479f0-333b-4a96-9adf-2099afdc2447" volumeName="kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.754725 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.754737 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.754748 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.754767 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.755292 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.755305 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.755314 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.755323 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.755332 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.755341 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.755355 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.755381 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.755396 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.755408 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.755425 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.755436 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.755444 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.755457 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.755478 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.755505 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.755515 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.755524 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.755532 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.755547 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.755557 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.755566 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.755576 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.755585 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.755594 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.755602 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.755611 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.755620 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.755629 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.755638 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.755649 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.755717 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.755730 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.755740 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.755750 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.755758 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.755770 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.755780 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.755789 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.755803 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.755813 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.755822 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.755832 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.755840 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.755849 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.755862 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.755872 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.755887 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.755902 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.755936 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.755945 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.755954 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.755968 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.755978 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.755987 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.757706 4753 reconstruct.go:144] "Volume is marked device as uncertain and added into the actual state" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" deviceMountPath="/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.758055 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.758068 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" volumeName="kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.758079 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.758090 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.758100 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.758112 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.758123 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.758141 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.758152 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.758171 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.758183 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.758202 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.758213 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.758240 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.758252 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.758264 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.758275 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.758320 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.758331 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.758347 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.758357 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.758367 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.758377 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.758391 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.758400 4753 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" seLinuxMountContext="" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.758414 4753 reconstruct.go:97] "Volume reconstruction finished" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.758424 4753 reconciler.go:26] "Reconciler: start to sync state" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.765062 4753 manager.go:324] Recovery completed Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.776152 4753 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.778576 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.778646 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.778670 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.780677 4753 cpu_manager.go:225] "Starting CPU manager" policy="none" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.780721 4753 cpu_manager.go:226] "Reconciling" reconcilePeriod="10s" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.780785 4753 state_mem.go:36] "Initialized new in-memory state store" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.815069 4753 policy_none.go:49] "None policy: Start" Jan 29 12:06:27 crc kubenswrapper[4753]: E0129 12:06:27.829010 4753 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.864600 4753 memory_manager.go:170] "Starting memorymanager" policy="None" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.864672 4753 state_mem.go:35] "Initializing new in-memory state store" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.884446 4753 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv4" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.887037 4753 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv6" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.887125 4753 status_manager.go:217] "Starting to sync pod status with apiserver" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.887167 4753 kubelet.go:2335] "Starting kubelet main sync loop" Jan 29 12:06:27 crc kubenswrapper[4753]: E0129 12:06:27.887385 4753 kubelet.go:2359] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" Jan 29 12:06:27 crc kubenswrapper[4753]: W0129 12:06:27.888507 4753 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.129.56.211:6443: connect: connection refused Jan 29 12:06:27 crc kubenswrapper[4753]: E0129 12:06:27.888698 4753 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.129.56.211:6443: connect: connection refused" logger="UnhandledError" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.929082 4753 manager.go:334] "Starting Device Plugin manager" Jan 29 12:06:27 crc kubenswrapper[4753]: E0129 12:06:27.929292 4753 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.929402 4753 manager.go:513] "Failed to read data from checkpoint" checkpoint="kubelet_internal_checkpoint" err="checkpoint is not found" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.929437 4753 server.go:79] "Starting device plugin registration server" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.930073 4753 eviction_manager.go:189] "Eviction manager: starting control loop" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.930112 4753 container_log_manager.go:189] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.930331 4753 plugin_watcher.go:51] "Plugin Watcher Start" path="/var/lib/kubelet/plugins_registry" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.930462 4753 plugin_manager.go:116] "The desired_state_of_world populator (plugin watcher) starts" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.930473 4753 plugin_manager.go:118] "Starting Kubelet Plugin Manager" Jan 29 12:06:27 crc kubenswrapper[4753]: E0129 12:06:27.930623 4753 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.129.56.211:6443: connect: connection refused" interval="400ms" Jan 29 12:06:27 crc kubenswrapper[4753]: E0129 12:06:27.938295 4753 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.988470 4753 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc","openshift-etcd/etcd-crc","openshift-kube-apiserver/kube-apiserver-crc","openshift-kube-controller-manager/kube-controller-manager-crc","openshift-kube-scheduler/openshift-kube-scheduler-crc"] Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.988662 4753 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.993450 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.993512 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.993521 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.993801 4753 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.994160 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.994263 4753 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.997477 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.997534 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.997482 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.997548 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.997566 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:06:27 crc kubenswrapper[4753]: I0129 12:06:27.997580 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:27.997902 4753 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:27.998196 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:27.998345 4753 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:27.999135 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:27.999178 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:27.999193 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:27.999418 4753 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:27.999608 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:27.999669 4753 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.000300 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.000340 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.000352 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.000711 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.000772 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.000788 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.000852 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.000868 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.000876 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.001037 4753 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.001175 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.001210 4753 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.002140 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.002157 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.002166 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.002299 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.002321 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.002359 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.002551 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.002575 4753 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.003354 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.003377 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.003386 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.030526 4753 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.031853 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.031904 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.031918 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.031952 4753 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 29 12:06:28 crc kubenswrapper[4753]: E0129 12:06:28.032687 4753 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.129.56.211:6443: connect: connection refused" node="crc" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.061917 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.061973 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.062006 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.062110 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.062191 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.062255 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.062287 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.062310 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.062337 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.062378 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.062462 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.062545 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.062590 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.062645 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.062685 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.164053 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.164116 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.164136 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.164155 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.164173 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.164194 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.164212 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.164239 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.164260 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.164275 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.164291 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.164306 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.164321 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.164334 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.164355 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.164380 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.164406 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.164379 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.164464 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.164428 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.164390 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.164473 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.164509 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.164544 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.164936 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.164514 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.165004 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.165055 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.164981 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.164506 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.233805 4753 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.235760 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.235818 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.235831 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.235867 4753 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 29 12:06:28 crc kubenswrapper[4753]: E0129 12:06:28.236590 4753 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.129.56.211:6443: connect: connection refused" node="crc" Jan 29 12:06:28 crc kubenswrapper[4753]: E0129 12:06:28.331675 4753 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.129.56.211:6443: connect: connection refused" interval="800ms" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.364916 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.392508 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.398478 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.416390 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.424789 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 29 12:06:28 crc kubenswrapper[4753]: W0129 12:06:28.430653 4753 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd1b160f5dda77d281dd8e69ec8d817f9.slice/crio-8f1df87b29a288f943726089ab0ad87f171a5223dda301b16b413547d68877ce WatchSource:0}: Error finding container 8f1df87b29a288f943726089ab0ad87f171a5223dda301b16b413547d68877ce: Status 404 returned error can't find the container with id 8f1df87b29a288f943726089ab0ad87f171a5223dda301b16b413547d68877ce Jan 29 12:06:28 crc kubenswrapper[4753]: W0129 12:06:28.438769 4753 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2139d3e2895fc6797b9c76a1b4c9886d.slice/crio-e5286309dd4849ec5087c4a05973fc05b8f0cbf2a28f82ba77694e3f5a588bc9 WatchSource:0}: Error finding container e5286309dd4849ec5087c4a05973fc05b8f0cbf2a28f82ba77694e3f5a588bc9: Status 404 returned error can't find the container with id e5286309dd4849ec5087c4a05973fc05b8f0cbf2a28f82ba77694e3f5a588bc9 Jan 29 12:06:28 crc kubenswrapper[4753]: W0129 12:06:28.446093 4753 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3dcd261975c3d6b9a6ad6367fd4facd3.slice/crio-5252a0ba4c1d1210bf9a1ea5b5b83408422b6423c1fe757d92e6e61dba24e59b WatchSource:0}: Error finding container 5252a0ba4c1d1210bf9a1ea5b5b83408422b6423c1fe757d92e6e61dba24e59b: Status 404 returned error can't find the container with id 5252a0ba4c1d1210bf9a1ea5b5b83408422b6423c1fe757d92e6e61dba24e59b Jan 29 12:06:28 crc kubenswrapper[4753]: W0129 12:06:28.448002 4753 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf614b9022728cf315e60c057852e563e.slice/crio-8fd28b40a6b44473c94c762e7d32105c67a0b719de088ebb27ca261cf8dc2354 WatchSource:0}: Error finding container 8fd28b40a6b44473c94c762e7d32105c67a0b719de088ebb27ca261cf8dc2354: Status 404 returned error can't find the container with id 8fd28b40a6b44473c94c762e7d32105c67a0b719de088ebb27ca261cf8dc2354 Jan 29 12:06:28 crc kubenswrapper[4753]: W0129 12:06:28.540629 4753 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.129.56.211:6443: connect: connection refused Jan 29 12:06:28 crc kubenswrapper[4753]: E0129 12:06:28.541205 4753 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.129.56.211:6443: connect: connection refused" logger="UnhandledError" Jan 29 12:06:28 crc kubenswrapper[4753]: W0129 12:06:28.571854 4753 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.129.56.211:6443: connect: connection refused Jan 29 12:06:28 crc kubenswrapper[4753]: E0129 12:06:28.572011 4753 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.129.56.211:6443: connect: connection refused" logger="UnhandledError" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.636863 4753 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.638536 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.638598 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.638616 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.638665 4753 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 29 12:06:28 crc kubenswrapper[4753]: E0129 12:06:28.639587 4753 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.129.56.211:6443: connect: connection refused" node="crc" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.729116 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-27 06:24:15.355568906 +0000 UTC Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.729886 4753 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.129.56.211:6443: connect: connection refused Jan 29 12:06:28 crc kubenswrapper[4753]: W0129 12:06:28.862732 4753 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.129.56.211:6443: connect: connection refused Jan 29 12:06:28 crc kubenswrapper[4753]: E0129 12:06:28.862820 4753 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.129.56.211:6443: connect: connection refused" logger="UnhandledError" Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.892991 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"8fd28b40a6b44473c94c762e7d32105c67a0b719de088ebb27ca261cf8dc2354"} Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.894119 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"5252a0ba4c1d1210bf9a1ea5b5b83408422b6423c1fe757d92e6e61dba24e59b"} Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.895103 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"421e29653f1a555733131836b20ef135060e45fcea6086840d384bf5bd5288b4"} Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.896913 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"e5286309dd4849ec5087c4a05973fc05b8f0cbf2a28f82ba77694e3f5a588bc9"} Jan 29 12:06:28 crc kubenswrapper[4753]: I0129 12:06:28.897950 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"8f1df87b29a288f943726089ab0ad87f171a5223dda301b16b413547d68877ce"} Jan 29 12:06:29 crc kubenswrapper[4753]: E0129 12:06:29.337139 4753 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.129.56.211:6443: connect: connection refused" interval="1.6s" Jan 29 12:06:29 crc kubenswrapper[4753]: W0129 12:06:29.341401 4753 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.129.56.211:6443: connect: connection refused Jan 29 12:06:29 crc kubenswrapper[4753]: E0129 12:06:29.341525 4753 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.129.56.211:6443: connect: connection refused" logger="UnhandledError" Jan 29 12:06:29 crc kubenswrapper[4753]: I0129 12:06:29.368160 4753 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Jan 29 12:06:29 crc kubenswrapper[4753]: E0129 12:06:29.370502 4753 certificate_manager.go:562] "Unhandled Error" err="kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post \"https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests\": dial tcp 38.129.56.211:6443: connect: connection refused" logger="UnhandledError" Jan 29 12:06:29 crc kubenswrapper[4753]: I0129 12:06:29.439682 4753 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 12:06:29 crc kubenswrapper[4753]: I0129 12:06:29.441182 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:06:29 crc kubenswrapper[4753]: I0129 12:06:29.441215 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:06:29 crc kubenswrapper[4753]: I0129 12:06:29.441240 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:06:29 crc kubenswrapper[4753]: I0129 12:06:29.441268 4753 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 29 12:06:29 crc kubenswrapper[4753]: E0129 12:06:29.441704 4753 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.129.56.211:6443: connect: connection refused" node="crc" Jan 29 12:06:29 crc kubenswrapper[4753]: I0129 12:06:29.729552 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-16 03:34:24.958825662 +0000 UTC Jan 29 12:06:29 crc kubenswrapper[4753]: I0129 12:06:29.729709 4753 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.129.56.211:6443: connect: connection refused Jan 29 12:06:29 crc kubenswrapper[4753]: I0129 12:06:29.906714 4753 generic.go:334] "Generic (PLEG): container finished" podID="3dcd261975c3d6b9a6ad6367fd4facd3" containerID="44c03bba5a02d879c3b235cd65897fb30651871689c8f02b9526966817f79636" exitCode=0 Jan 29 12:06:29 crc kubenswrapper[4753]: I0129 12:06:29.906812 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerDied","Data":"44c03bba5a02d879c3b235cd65897fb30651871689c8f02b9526966817f79636"} Jan 29 12:06:29 crc kubenswrapper[4753]: I0129 12:06:29.906964 4753 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 12:06:29 crc kubenswrapper[4753]: I0129 12:06:29.908442 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:06:29 crc kubenswrapper[4753]: I0129 12:06:29.908500 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:06:29 crc kubenswrapper[4753]: I0129 12:06:29.908521 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:06:29 crc kubenswrapper[4753]: I0129 12:06:29.909272 4753 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="6538a934d097cd5a295952f64961a0a8fb5bfa5ebcd27a0a4dd3638029b9cdb1" exitCode=0 Jan 29 12:06:29 crc kubenswrapper[4753]: I0129 12:06:29.909341 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"6538a934d097cd5a295952f64961a0a8fb5bfa5ebcd27a0a4dd3638029b9cdb1"} Jan 29 12:06:29 crc kubenswrapper[4753]: I0129 12:06:29.909608 4753 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 12:06:29 crc kubenswrapper[4753]: I0129 12:06:29.957084 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:06:29 crc kubenswrapper[4753]: I0129 12:06:29.957177 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:06:29 crc kubenswrapper[4753]: I0129 12:06:29.957210 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:06:29 crc kubenswrapper[4753]: I0129 12:06:29.959563 4753 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="9751e4bf71bb585d41ebc678e9508fa63e947ccc42a3cfa048d482ecbb34089e" exitCode=0 Jan 29 12:06:29 crc kubenswrapper[4753]: I0129 12:06:29.959683 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"9751e4bf71bb585d41ebc678e9508fa63e947ccc42a3cfa048d482ecbb34089e"} Jan 29 12:06:29 crc kubenswrapper[4753]: I0129 12:06:29.959840 4753 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 12:06:29 crc kubenswrapper[4753]: I0129 12:06:29.960178 4753 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 12:06:30 crc kubenswrapper[4753]: I0129 12:06:30.543924 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:06:30 crc kubenswrapper[4753]: I0129 12:06:30.544144 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:06:30 crc kubenswrapper[4753]: I0129 12:06:30.544160 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:06:30 crc kubenswrapper[4753]: I0129 12:06:30.546073 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:06:30 crc kubenswrapper[4753]: I0129 12:06:30.546135 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:06:30 crc kubenswrapper[4753]: I0129 12:06:30.546156 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:06:30 crc kubenswrapper[4753]: I0129 12:06:30.548995 4753 generic.go:334] "Generic (PLEG): container finished" podID="d1b160f5dda77d281dd8e69ec8d817f9" containerID="fa000ea086031e13fdded4648ef18460b2192b270143a3866a3f448d5a006eb5" exitCode=0 Jan 29 12:06:30 crc kubenswrapper[4753]: I0129 12:06:30.549162 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerDied","Data":"fa000ea086031e13fdded4648ef18460b2192b270143a3866a3f448d5a006eb5"} Jan 29 12:06:30 crc kubenswrapper[4753]: I0129 12:06:30.549197 4753 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 12:06:30 crc kubenswrapper[4753]: I0129 12:06:30.550348 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:06:30 crc kubenswrapper[4753]: I0129 12:06:30.550381 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:06:30 crc kubenswrapper[4753]: I0129 12:06:30.550391 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:06:30 crc kubenswrapper[4753]: I0129 12:06:30.551849 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"9842d9d6fed8c87bc20835964d3271b0be3117fedc658e6ef028db422d1ad478"} Jan 29 12:06:30 crc kubenswrapper[4753]: I0129 12:06:30.551897 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"b4fd4169036d3a646c9e5e4bfa8d86a92c557c3d8f77608ae5308dd0b8c45517"} Jan 29 12:06:30 crc kubenswrapper[4753]: I0129 12:06:30.766787 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-08 00:11:14.796124971 +0000 UTC Jan 29 12:06:30 crc kubenswrapper[4753]: I0129 12:06:30.766905 4753 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.129.56.211:6443: connect: connection refused Jan 29 12:06:30 crc kubenswrapper[4753]: W0129 12:06:30.916294 4753 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.129.56.211:6443: connect: connection refused Jan 29 12:06:30 crc kubenswrapper[4753]: E0129 12:06:30.916492 4753 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.129.56.211:6443: connect: connection refused" logger="UnhandledError" Jan 29 12:06:30 crc kubenswrapper[4753]: E0129 12:06:30.938934 4753 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.129.56.211:6443: connect: connection refused" interval="3.2s" Jan 29 12:06:31 crc kubenswrapper[4753]: W0129 12:06:30.999945 4753 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.129.56.211:6443: connect: connection refused Jan 29 12:06:31 crc kubenswrapper[4753]: E0129 12:06:31.000036 4753 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.129.56.211:6443: connect: connection refused" logger="UnhandledError" Jan 29 12:06:31 crc kubenswrapper[4753]: I0129 12:06:31.042261 4753 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 12:06:31 crc kubenswrapper[4753]: I0129 12:06:31.043986 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:06:31 crc kubenswrapper[4753]: I0129 12:06:31.044028 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:06:31 crc kubenswrapper[4753]: I0129 12:06:31.044041 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:06:31 crc kubenswrapper[4753]: I0129 12:06:31.044075 4753 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 29 12:06:31 crc kubenswrapper[4753]: E0129 12:06:31.044655 4753 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.129.56.211:6443: connect: connection refused" node="crc" Jan 29 12:06:31 crc kubenswrapper[4753]: W0129 12:06:31.342216 4753 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.129.56.211:6443: connect: connection refused Jan 29 12:06:31 crc kubenswrapper[4753]: E0129 12:06:31.342660 4753 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.129.56.211:6443: connect: connection refused" logger="UnhandledError" Jan 29 12:06:31 crc kubenswrapper[4753]: I0129 12:06:31.560408 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"31bd7db7bddd3e16cb455f7af9fd3a9bca49919c8f1ffb676dec83c543ba7763"} Jan 29 12:06:31 crc kubenswrapper[4753]: I0129 12:06:31.560524 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"1b4a6bbb431e321e54013c96e3cca1537283cbdf65d89a978780550d603663fd"} Jan 29 12:06:31 crc kubenswrapper[4753]: I0129 12:06:31.560685 4753 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 12:06:31 crc kubenswrapper[4753]: I0129 12:06:31.562154 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:06:31 crc kubenswrapper[4753]: I0129 12:06:31.562185 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:06:31 crc kubenswrapper[4753]: I0129 12:06:31.562193 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:06:31 crc kubenswrapper[4753]: I0129 12:06:31.564677 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"0a9ec3e416b3b385c71b2fa28c0215fcb04b708e33fec2d85fdf0a75b848b027"} Jan 29 12:06:31 crc kubenswrapper[4753]: I0129 12:06:31.567838 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"cab3ab80a8fddbc591cd07fdbed800f525bb8a1b5505bf818ebb59cbcce73003"} Jan 29 12:06:31 crc kubenswrapper[4753]: I0129 12:06:31.571418 4753 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="87b7005002ba34f8e1f10beb384731bb714bcd8d43a0732e0d08999d9b43ea5b" exitCode=0 Jan 29 12:06:31 crc kubenswrapper[4753]: I0129 12:06:31.571539 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"87b7005002ba34f8e1f10beb384731bb714bcd8d43a0732e0d08999d9b43ea5b"} Jan 29 12:06:31 crc kubenswrapper[4753]: I0129 12:06:31.571686 4753 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 12:06:31 crc kubenswrapper[4753]: I0129 12:06:31.573046 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:06:31 crc kubenswrapper[4753]: I0129 12:06:31.573094 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:06:31 crc kubenswrapper[4753]: I0129 12:06:31.573107 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:06:31 crc kubenswrapper[4753]: I0129 12:06:31.578650 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"308d58d7f5a84b4d1980716b231abfe2ea2e9355bff9f5c998584d6986a9a389"} Jan 29 12:06:31 crc kubenswrapper[4753]: I0129 12:06:31.578741 4753 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 12:06:31 crc kubenswrapper[4753]: I0129 12:06:31.581041 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:06:31 crc kubenswrapper[4753]: I0129 12:06:31.581112 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:06:31 crc kubenswrapper[4753]: I0129 12:06:31.581135 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:06:31 crc kubenswrapper[4753]: I0129 12:06:31.730051 4753 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.129.56.211:6443: connect: connection refused Jan 29 12:06:31 crc kubenswrapper[4753]: I0129 12:06:31.767510 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-09 08:13:53.708500066 +0000 UTC Jan 29 12:06:32 crc kubenswrapper[4753]: E0129 12:06:32.427539 4753 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.129.56.211:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.188f323657180095 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-29 12:06:27.550707861 +0000 UTC m=+1.802789326,LastTimestamp:2026-01-29 12:06:27.550707861 +0000 UTC m=+1.802789326,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 29 12:06:32 crc kubenswrapper[4753]: W0129 12:06:32.551842 4753 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.129.56.211:6443: connect: connection refused Jan 29 12:06:32 crc kubenswrapper[4753]: E0129 12:06:32.551968 4753 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.129.56.211:6443: connect: connection refused" logger="UnhandledError" Jan 29 12:06:32 crc kubenswrapper[4753]: I0129 12:06:32.592762 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"8dea62586ba5ac5a8ddc9e75b216a27593c8fe99516abc38c9c411a93842a372"} Jan 29 12:06:32 crc kubenswrapper[4753]: I0129 12:06:32.592825 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"e4fd3a920ea0c98edb2c8ad37d2a1a5d1ed50de5bc8c3e2f4d34c876b2cab54f"} Jan 29 12:06:32 crc kubenswrapper[4753]: I0129 12:06:32.592972 4753 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 12:06:32 crc kubenswrapper[4753]: I0129 12:06:32.594568 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:06:32 crc kubenswrapper[4753]: I0129 12:06:32.594617 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:06:32 crc kubenswrapper[4753]: I0129 12:06:32.594627 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:06:32 crc kubenswrapper[4753]: I0129 12:06:32.595690 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"7af8be0836a8ff7d08ff48d9487acdedd4b83127538d071d13b3215400c0f8de"} Jan 29 12:06:32 crc kubenswrapper[4753]: I0129 12:06:32.595720 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"91d57a9cede640bdcec325175598b72dd00398436b730248728c7d9afd545b1e"} Jan 29 12:06:32 crc kubenswrapper[4753]: I0129 12:06:32.598441 4753 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="fba07c0b667eb156d53d6e7d4b402493b04d0670076d1bf641fe837db5a23fac" exitCode=0 Jan 29 12:06:32 crc kubenswrapper[4753]: I0129 12:06:32.598817 4753 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 12:06:32 crc kubenswrapper[4753]: I0129 12:06:32.599013 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"fba07c0b667eb156d53d6e7d4b402493b04d0670076d1bf641fe837db5a23fac"} Jan 29 12:06:32 crc kubenswrapper[4753]: I0129 12:06:32.599158 4753 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 12:06:32 crc kubenswrapper[4753]: I0129 12:06:32.601260 4753 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 12:06:32 crc kubenswrapper[4753]: I0129 12:06:32.603298 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:06:32 crc kubenswrapper[4753]: I0129 12:06:32.603348 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:06:32 crc kubenswrapper[4753]: I0129 12:06:32.603368 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:06:32 crc kubenswrapper[4753]: I0129 12:06:32.603850 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:06:32 crc kubenswrapper[4753]: I0129 12:06:32.603886 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:06:32 crc kubenswrapper[4753]: I0129 12:06:32.603898 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:06:32 crc kubenswrapper[4753]: I0129 12:06:32.605520 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:06:32 crc kubenswrapper[4753]: I0129 12:06:32.605547 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:06:32 crc kubenswrapper[4753]: I0129 12:06:32.605563 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:06:32 crc kubenswrapper[4753]: I0129 12:06:32.747455 4753 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.129.56.211:6443: connect: connection refused Jan 29 12:06:33 crc kubenswrapper[4753]: I0129 12:06:32.777673 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-27 12:57:55.656042542 +0000 UTC Jan 29 12:06:34 crc kubenswrapper[4753]: I0129 12:06:34.164375 4753 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Jan 29 12:06:34 crc kubenswrapper[4753]: I0129 12:06:34.168498 4753 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.129.56.211:6443: connect: connection refused Jan 29 12:06:34 crc kubenswrapper[4753]: E0129 12:06:34.168502 4753 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.129.56.211:6443: connect: connection refused" interval="6.4s" Jan 29 12:06:34 crc kubenswrapper[4753]: E0129 12:06:34.169838 4753 certificate_manager.go:562] "Unhandled Error" err="kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post \"https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests\": dial tcp 38.129.56.211:6443: connect: connection refused" logger="UnhandledError" Jan 29 12:06:34 crc kubenswrapper[4753]: I0129 12:06:34.191291 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"7e02d977e34888952623f4ced5e073d7bf1ff360f8deb435dc577d2ecf146cb2"} Jan 29 12:06:34 crc kubenswrapper[4753]: I0129 12:06:34.200349 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"7fbc354ce7f2eef632b45de4d4a7e4e9fa97eca1aae9a9d345caa3771ab7b56a"} Jan 29 12:06:34 crc kubenswrapper[4753]: I0129 12:06:34.200748 4753 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 29 12:06:34 crc kubenswrapper[4753]: I0129 12:06:34.201071 4753 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 12:06:34 crc kubenswrapper[4753]: I0129 12:06:34.202908 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:06:34 crc kubenswrapper[4753]: I0129 12:06:34.202958 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:06:34 crc kubenswrapper[4753]: I0129 12:06:34.202980 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:06:34 crc kubenswrapper[4753]: I0129 12:06:34.261424 4753 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 12:06:34 crc kubenswrapper[4753]: I0129 12:06:34.261446 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-10 20:35:24.133347946 +0000 UTC Jan 29 12:06:34 crc kubenswrapper[4753]: I0129 12:06:34.265714 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:06:34 crc kubenswrapper[4753]: I0129 12:06:34.265774 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:06:34 crc kubenswrapper[4753]: I0129 12:06:34.265785 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:06:34 crc kubenswrapper[4753]: I0129 12:06:34.265953 4753 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 29 12:06:34 crc kubenswrapper[4753]: E0129 12:06:34.272787 4753 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.129.56.211:6443: connect: connection refused" node="crc" Jan 29 12:06:34 crc kubenswrapper[4753]: I0129 12:06:34.816169 4753 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.129.56.211:6443: connect: connection refused Jan 29 12:06:35 crc kubenswrapper[4753]: W0129 12:06:35.079861 4753 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.129.56.211:6443: connect: connection refused Jan 29 12:06:35 crc kubenswrapper[4753]: E0129 12:06:35.080158 4753 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.129.56.211:6443: connect: connection refused" logger="UnhandledError" Jan 29 12:06:35 crc kubenswrapper[4753]: I0129 12:06:35.217115 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"49621dd18e83307eaf20b70845e2c308399fbdf84166ab05eec32d1a08f5ae6e"} Jan 29 12:06:35 crc kubenswrapper[4753]: I0129 12:06:35.219789 4753 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 12:06:35 crc kubenswrapper[4753]: I0129 12:06:35.255451 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:06:35 crc kubenswrapper[4753]: I0129 12:06:35.255621 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:06:35 crc kubenswrapper[4753]: I0129 12:06:35.255649 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:06:35 crc kubenswrapper[4753]: I0129 12:06:35.265122 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-17 09:46:58.540559623 +0000 UTC Jan 29 12:06:35 crc kubenswrapper[4753]: I0129 12:06:35.280539 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"da4b59a27b3f9034dedd3c9d1b99d18494ce79b8de9859b31b6cec7b5ceafa34"} Jan 29 12:06:35 crc kubenswrapper[4753]: W0129 12:06:35.286640 4753 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.129.56.211:6443: connect: connection refused Jan 29 12:06:35 crc kubenswrapper[4753]: E0129 12:06:35.287521 4753 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.129.56.211:6443: connect: connection refused" logger="UnhandledError" Jan 29 12:06:35 crc kubenswrapper[4753]: I0129 12:06:35.486417 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 29 12:06:35 crc kubenswrapper[4753]: I0129 12:06:35.486753 4753 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 12:06:35 crc kubenswrapper[4753]: I0129 12:06:35.511328 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:06:35 crc kubenswrapper[4753]: I0129 12:06:35.511381 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:06:35 crc kubenswrapper[4753]: I0129 12:06:35.511409 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:06:35 crc kubenswrapper[4753]: I0129 12:06:35.544627 4753 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 12:06:35 crc kubenswrapper[4753]: I0129 12:06:35.544932 4753 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="Get \"https://192.168.126.11:6443/livez\": dial tcp 192.168.126.11:6443: connect: connection refused" start-of-body= Jan 29 12:06:35 crc kubenswrapper[4753]: I0129 12:06:35.545180 4753 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="Get \"https://192.168.126.11:6443/livez\": dial tcp 192.168.126.11:6443: connect: connection refused" Jan 29 12:06:35 crc kubenswrapper[4753]: I0129 12:06:35.729265 4753 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.129.56.211:6443: connect: connection refused Jan 29 12:06:35 crc kubenswrapper[4753]: I0129 12:06:35.773984 4753 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 29 12:06:35 crc kubenswrapper[4753]: I0129 12:06:35.774787 4753 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 12:06:35 crc kubenswrapper[4753]: I0129 12:06:35.776810 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:06:35 crc kubenswrapper[4753]: I0129 12:06:35.776840 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:06:35 crc kubenswrapper[4753]: I0129 12:06:35.776851 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:06:36 crc kubenswrapper[4753]: I0129 12:06:36.265942 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-13 10:57:17.897202243 +0000 UTC Jan 29 12:06:36 crc kubenswrapper[4753]: I0129 12:06:36.355556 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"87225c3b56866e248306db44dc5f4109959d92b1f30e10123444abd4972eaf14"} Jan 29 12:06:36 crc kubenswrapper[4753]: I0129 12:06:36.355604 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"ffa961c755854fc2bf3bfe04f2a6ab08e38facde5e1e6cdecb469c9fa50d3df2"} Jan 29 12:06:36 crc kubenswrapper[4753]: I0129 12:06:36.355710 4753 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 12:06:36 crc kubenswrapper[4753]: I0129 12:06:36.355762 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 12:06:36 crc kubenswrapper[4753]: I0129 12:06:36.356546 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:06:36 crc kubenswrapper[4753]: I0129 12:06:36.356574 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:06:36 crc kubenswrapper[4753]: I0129 12:06:36.356582 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:06:36 crc kubenswrapper[4753]: I0129 12:06:36.730181 4753 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.129.56.211:6443: connect: connection refused Jan 29 12:06:37 crc kubenswrapper[4753]: I0129 12:06:37.266952 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-08 23:10:49.775494955 +0000 UTC Jan 29 12:06:37 crc kubenswrapper[4753]: I0129 12:06:37.359517 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Jan 29 12:06:37 crc kubenswrapper[4753]: I0129 12:06:37.362210 4753 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="49621dd18e83307eaf20b70845e2c308399fbdf84166ab05eec32d1a08f5ae6e" exitCode=255 Jan 29 12:06:37 crc kubenswrapper[4753]: I0129 12:06:37.362252 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"49621dd18e83307eaf20b70845e2c308399fbdf84166ab05eec32d1a08f5ae6e"} Jan 29 12:06:37 crc kubenswrapper[4753]: I0129 12:06:37.362503 4753 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 12:06:37 crc kubenswrapper[4753]: I0129 12:06:37.363796 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:06:37 crc kubenswrapper[4753]: I0129 12:06:37.363851 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:06:37 crc kubenswrapper[4753]: I0129 12:06:37.363868 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:06:37 crc kubenswrapper[4753]: I0129 12:06:37.365599 4753 scope.go:117] "RemoveContainer" containerID="49621dd18e83307eaf20b70845e2c308399fbdf84166ab05eec32d1a08f5ae6e" Jan 29 12:06:37 crc kubenswrapper[4753]: I0129 12:06:37.371074 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"d8978db8b29ad33a2d627a52f5dc9af845e573644010c152666ff442c817687a"} Jan 29 12:06:37 crc kubenswrapper[4753]: I0129 12:06:37.371271 4753 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 12:06:37 crc kubenswrapper[4753]: I0129 12:06:37.372084 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:06:37 crc kubenswrapper[4753]: I0129 12:06:37.372111 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:06:37 crc kubenswrapper[4753]: I0129 12:06:37.372119 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:06:37 crc kubenswrapper[4753]: E0129 12:06:37.938562 4753 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Jan 29 12:06:38 crc kubenswrapper[4753]: I0129 12:06:38.237928 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 12:06:38 crc kubenswrapper[4753]: I0129 12:06:38.268413 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-06 00:41:34.815732784 +0000 UTC Jan 29 12:06:38 crc kubenswrapper[4753]: I0129 12:06:38.375350 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Jan 29 12:06:38 crc kubenswrapper[4753]: I0129 12:06:38.377833 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"34151f1c4f743e8fa0a2c27e644bbea9c3911e2252501ad8aa95ad3e4346222a"} Jan 29 12:06:38 crc kubenswrapper[4753]: I0129 12:06:38.377916 4753 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 12:06:38 crc kubenswrapper[4753]: I0129 12:06:38.377972 4753 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 12:06:38 crc kubenswrapper[4753]: I0129 12:06:38.379098 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:06:38 crc kubenswrapper[4753]: I0129 12:06:38.379136 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:06:38 crc kubenswrapper[4753]: I0129 12:06:38.379140 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:06:38 crc kubenswrapper[4753]: I0129 12:06:38.379152 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:06:38 crc kubenswrapper[4753]: I0129 12:06:38.379203 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:06:38 crc kubenswrapper[4753]: I0129 12:06:38.379165 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:06:38 crc kubenswrapper[4753]: I0129 12:06:38.470076 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 29 12:06:38 crc kubenswrapper[4753]: I0129 12:06:38.470358 4753 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 12:06:38 crc kubenswrapper[4753]: I0129 12:06:38.472062 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:06:38 crc kubenswrapper[4753]: I0129 12:06:38.472106 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:06:38 crc kubenswrapper[4753]: I0129 12:06:38.472119 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:06:38 crc kubenswrapper[4753]: I0129 12:06:38.774865 4753 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 29 12:06:38 crc kubenswrapper[4753]: I0129 12:06:38.774970 4753 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 29 12:06:39 crc kubenswrapper[4753]: I0129 12:06:39.269132 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-13 14:56:38.778159278 +0000 UTC Jan 29 12:06:39 crc kubenswrapper[4753]: I0129 12:06:39.380209 4753 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 12:06:39 crc kubenswrapper[4753]: I0129 12:06:39.380319 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 12:06:39 crc kubenswrapper[4753]: I0129 12:06:39.381351 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:06:39 crc kubenswrapper[4753]: I0129 12:06:39.381392 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:06:39 crc kubenswrapper[4753]: I0129 12:06:39.381404 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:06:39 crc kubenswrapper[4753]: I0129 12:06:39.891294 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-etcd/etcd-crc" Jan 29 12:06:39 crc kubenswrapper[4753]: I0129 12:06:39.891480 4753 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 12:06:39 crc kubenswrapper[4753]: I0129 12:06:39.892568 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:06:39 crc kubenswrapper[4753]: I0129 12:06:39.892603 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:06:39 crc kubenswrapper[4753]: I0129 12:06:39.892612 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:06:40 crc kubenswrapper[4753]: I0129 12:06:40.270405 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-10 04:37:32.259456139 +0000 UTC Jan 29 12:06:40 crc kubenswrapper[4753]: I0129 12:06:40.382758 4753 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 12:06:40 crc kubenswrapper[4753]: I0129 12:06:40.383991 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:06:40 crc kubenswrapper[4753]: I0129 12:06:40.384044 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:06:40 crc kubenswrapper[4753]: I0129 12:06:40.384070 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:06:40 crc kubenswrapper[4753]: I0129 12:06:40.673356 4753 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 12:06:40 crc kubenswrapper[4753]: I0129 12:06:40.675472 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:06:40 crc kubenswrapper[4753]: I0129 12:06:40.675508 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:06:40 crc kubenswrapper[4753]: I0129 12:06:40.675519 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:06:40 crc kubenswrapper[4753]: I0129 12:06:40.675549 4753 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 29 12:06:41 crc kubenswrapper[4753]: I0129 12:06:41.271131 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-09 06:27:00.77231538 +0000 UTC Jan 29 12:06:41 crc kubenswrapper[4753]: I0129 12:06:41.327540 4753 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 29 12:06:41 crc kubenswrapper[4753]: I0129 12:06:41.327909 4753 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 12:06:41 crc kubenswrapper[4753]: I0129 12:06:41.330024 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:06:41 crc kubenswrapper[4753]: I0129 12:06:41.330184 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:06:41 crc kubenswrapper[4753]: I0129 12:06:41.330289 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:06:41 crc kubenswrapper[4753]: I0129 12:06:41.339495 4753 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 29 12:06:41 crc kubenswrapper[4753]: I0129 12:06:41.385831 4753 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 12:06:41 crc kubenswrapper[4753]: I0129 12:06:41.386835 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:06:41 crc kubenswrapper[4753]: I0129 12:06:41.386876 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:06:41 crc kubenswrapper[4753]: I0129 12:06:41.386886 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:06:41 crc kubenswrapper[4753]: I0129 12:06:41.390808 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 29 12:06:41 crc kubenswrapper[4753]: I0129 12:06:41.393832 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 29 12:06:42 crc kubenswrapper[4753]: I0129 12:06:42.036398 4753 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-etcd/etcd-crc" Jan 29 12:06:42 crc kubenswrapper[4753]: I0129 12:06:42.036711 4753 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 12:06:42 crc kubenswrapper[4753]: I0129 12:06:42.038341 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:06:42 crc kubenswrapper[4753]: I0129 12:06:42.038364 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:06:42 crc kubenswrapper[4753]: I0129 12:06:42.038374 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:06:42 crc kubenswrapper[4753]: I0129 12:06:42.271515 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-21 19:17:24.307715698 +0000 UTC Jan 29 12:06:42 crc kubenswrapper[4753]: I0129 12:06:42.388508 4753 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 12:06:42 crc kubenswrapper[4753]: I0129 12:06:42.389349 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:06:42 crc kubenswrapper[4753]: I0129 12:06:42.389391 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:06:42 crc kubenswrapper[4753]: I0129 12:06:42.389403 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:06:42 crc kubenswrapper[4753]: I0129 12:06:42.598364 4753 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Jan 29 12:06:43 crc kubenswrapper[4753]: I0129 12:06:43.272019 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-27 22:18:46.171625992 +0000 UTC Jan 29 12:06:43 crc kubenswrapper[4753]: I0129 12:06:43.390824 4753 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 12:06:43 crc kubenswrapper[4753]: I0129 12:06:43.391860 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:06:43 crc kubenswrapper[4753]: I0129 12:06:43.391909 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:06:43 crc kubenswrapper[4753]: I0129 12:06:43.391918 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:06:44 crc kubenswrapper[4753]: I0129 12:06:44.272201 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-07 21:01:41.287929121 +0000 UTC Jan 29 12:06:45 crc kubenswrapper[4753]: I0129 12:06:45.273118 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-06 13:18:26.465662593 +0000 UTC Jan 29 12:06:46 crc kubenswrapper[4753]: I0129 12:06:46.274945 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-13 02:15:19.885780923 +0000 UTC Jan 29 12:06:47 crc kubenswrapper[4753]: I0129 12:06:47.275097 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-12 23:07:23.420373888 +0000 UTC Jan 29 12:06:47 crc kubenswrapper[4753]: W0129 12:06:47.488689 4753 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": net/http: TLS handshake timeout Jan 29 12:06:47 crc kubenswrapper[4753]: I0129 12:06:47.488907 4753 trace.go:236] Trace[236825001]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (29-Jan-2026 12:06:37.486) (total time: 10001ms): Jan 29 12:06:47 crc kubenswrapper[4753]: Trace[236825001]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": net/http: TLS handshake timeout 10001ms (12:06:47.488) Jan 29 12:06:47 crc kubenswrapper[4753]: Trace[236825001]: [10.00197778s] [10.00197778s] END Jan 29 12:06:47 crc kubenswrapper[4753]: E0129 12:06:47.488943 4753 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" Jan 29 12:06:47 crc kubenswrapper[4753]: I0129 12:06:47.731342 4753 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": net/http: TLS handshake timeout Jan 29 12:06:47 crc kubenswrapper[4753]: E0129 12:06:47.938743 4753 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Jan 29 12:06:48 crc kubenswrapper[4753]: I0129 12:06:48.276316 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-08 02:59:32.634875533 +0000 UTC Jan 29 12:06:48 crc kubenswrapper[4753]: W0129 12:06:48.512472 4753 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": net/http: TLS handshake timeout Jan 29 12:06:48 crc kubenswrapper[4753]: I0129 12:06:48.512636 4753 trace.go:236] Trace[1841481765]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (29-Jan-2026 12:06:38.510) (total time: 10002ms): Jan 29 12:06:48 crc kubenswrapper[4753]: Trace[1841481765]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": net/http: TLS handshake timeout 10001ms (12:06:48.512) Jan 29 12:06:48 crc kubenswrapper[4753]: Trace[1841481765]: [10.002049622s] [10.002049622s] END Jan 29 12:06:48 crc kubenswrapper[4753]: E0129 12:06:48.512671 4753 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" Jan 29 12:06:48 crc kubenswrapper[4753]: I0129 12:06:48.775308 4753 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 29 12:06:48 crc kubenswrapper[4753]: I0129 12:06:48.775893 4753 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 29 12:06:49 crc kubenswrapper[4753]: I0129 12:06:49.183288 4753 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Jan 29 12:06:49 crc kubenswrapper[4753]: I0129 12:06:49.183385 4753 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Jan 29 12:06:49 crc kubenswrapper[4753]: I0129 12:06:49.188806 4753 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Jan 29 12:06:49 crc kubenswrapper[4753]: I0129 12:06:49.188899 4753 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Jan 29 12:06:49 crc kubenswrapper[4753]: I0129 12:06:49.288854 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-10 08:17:59.693759686 +0000 UTC Jan 29 12:06:50 crc kubenswrapper[4753]: I0129 12:06:50.290043 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-16 17:45:23.672828134 +0000 UTC Jan 29 12:06:50 crc kubenswrapper[4753]: I0129 12:06:50.743595 4753 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Jan 29 12:06:50 crc kubenswrapper[4753]: [+]log ok Jan 29 12:06:50 crc kubenswrapper[4753]: [+]etcd ok Jan 29 12:06:50 crc kubenswrapper[4753]: [+]poststarthook/openshift.io-api-request-count-filter ok Jan 29 12:06:50 crc kubenswrapper[4753]: [+]poststarthook/openshift.io-startkubeinformers ok Jan 29 12:06:50 crc kubenswrapper[4753]: [+]poststarthook/openshift.io-openshift-apiserver-reachable ok Jan 29 12:06:50 crc kubenswrapper[4753]: [+]poststarthook/openshift.io-oauth-apiserver-reachable ok Jan 29 12:06:50 crc kubenswrapper[4753]: [+]poststarthook/start-apiserver-admission-initializer ok Jan 29 12:06:50 crc kubenswrapper[4753]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Jan 29 12:06:50 crc kubenswrapper[4753]: [+]poststarthook/generic-apiserver-start-informers ok Jan 29 12:06:50 crc kubenswrapper[4753]: [+]poststarthook/priority-and-fairness-config-consumer ok Jan 29 12:06:50 crc kubenswrapper[4753]: [+]poststarthook/priority-and-fairness-filter ok Jan 29 12:06:50 crc kubenswrapper[4753]: [+]poststarthook/storage-object-count-tracker-hook ok Jan 29 12:06:50 crc kubenswrapper[4753]: [+]poststarthook/start-apiextensions-informers ok Jan 29 12:06:50 crc kubenswrapper[4753]: [-]poststarthook/start-apiextensions-controllers failed: reason withheld Jan 29 12:06:50 crc kubenswrapper[4753]: [+]poststarthook/crd-informer-synced ok Jan 29 12:06:50 crc kubenswrapper[4753]: [+]poststarthook/start-system-namespaces-controller ok Jan 29 12:06:50 crc kubenswrapper[4753]: [+]poststarthook/start-cluster-authentication-info-controller ok Jan 29 12:06:50 crc kubenswrapper[4753]: [+]poststarthook/start-kube-apiserver-identity-lease-controller ok Jan 29 12:06:50 crc kubenswrapper[4753]: [+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok Jan 29 12:06:50 crc kubenswrapper[4753]: [+]poststarthook/start-legacy-token-tracking-controller ok Jan 29 12:06:50 crc kubenswrapper[4753]: [+]poststarthook/start-service-ip-repair-controllers ok Jan 29 12:06:50 crc kubenswrapper[4753]: [-]poststarthook/rbac/bootstrap-roles failed: reason withheld Jan 29 12:06:50 crc kubenswrapper[4753]: [+]poststarthook/scheduling/bootstrap-system-priority-classes ok Jan 29 12:06:50 crc kubenswrapper[4753]: [+]poststarthook/priority-and-fairness-config-producer ok Jan 29 12:06:50 crc kubenswrapper[4753]: [+]poststarthook/bootstrap-controller ok Jan 29 12:06:50 crc kubenswrapper[4753]: [+]poststarthook/aggregator-reload-proxy-client-cert ok Jan 29 12:06:50 crc kubenswrapper[4753]: [+]poststarthook/start-kube-aggregator-informers ok Jan 29 12:06:50 crc kubenswrapper[4753]: [+]poststarthook/apiservice-status-local-available-controller ok Jan 29 12:06:50 crc kubenswrapper[4753]: [+]poststarthook/apiservice-status-remote-available-controller ok Jan 29 12:06:50 crc kubenswrapper[4753]: [+]poststarthook/apiservice-registration-controller ok Jan 29 12:06:50 crc kubenswrapper[4753]: [+]poststarthook/apiservice-wait-for-first-sync ok Jan 29 12:06:50 crc kubenswrapper[4753]: [+]poststarthook/apiservice-discovery-controller ok Jan 29 12:06:50 crc kubenswrapper[4753]: [+]poststarthook/kube-apiserver-autoregistration ok Jan 29 12:06:50 crc kubenswrapper[4753]: [+]autoregister-completion ok Jan 29 12:06:50 crc kubenswrapper[4753]: [+]poststarthook/apiservice-openapi-controller ok Jan 29 12:06:50 crc kubenswrapper[4753]: [+]poststarthook/apiservice-openapiv3-controller ok Jan 29 12:06:50 crc kubenswrapper[4753]: livez check failed Jan 29 12:06:50 crc kubenswrapper[4753]: I0129 12:06:50.743667 4753 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 29 12:06:51 crc kubenswrapper[4753]: I0129 12:06:51.290583 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-05 12:40:33.506790855 +0000 UTC Jan 29 12:06:52 crc kubenswrapper[4753]: I0129 12:06:52.073600 4753 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-etcd/etcd-crc" Jan 29 12:06:52 crc kubenswrapper[4753]: I0129 12:06:52.073885 4753 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 12:06:52 crc kubenswrapper[4753]: I0129 12:06:52.075379 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:06:52 crc kubenswrapper[4753]: I0129 12:06:52.075428 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:06:52 crc kubenswrapper[4753]: I0129 12:06:52.075460 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:06:52 crc kubenswrapper[4753]: I0129 12:06:52.090698 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-etcd/etcd-crc" Jan 29 12:06:52 crc kubenswrapper[4753]: I0129 12:06:52.291270 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-30 02:14:58.780595885 +0000 UTC Jan 29 12:06:52 crc kubenswrapper[4753]: I0129 12:06:52.502167 4753 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 12:06:52 crc kubenswrapper[4753]: I0129 12:06:52.503654 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:06:52 crc kubenswrapper[4753]: I0129 12:06:52.503758 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:06:52 crc kubenswrapper[4753]: I0129 12:06:52.503768 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:06:53 crc kubenswrapper[4753]: I0129 12:06:53.292359 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-15 18:32:15.267717298 +0000 UTC Jan 29 12:06:54 crc kubenswrapper[4753]: E0129 12:06:54.170526 4753 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": context deadline exceeded" interval="7s" Jan 29 12:06:54 crc kubenswrapper[4753]: I0129 12:06:54.173996 4753 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Jan 29 12:06:54 crc kubenswrapper[4753]: I0129 12:06:54.174045 4753 trace.go:236] Trace[137532577]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (29-Jan-2026 12:06:42.538) (total time: 11635ms): Jan 29 12:06:54 crc kubenswrapper[4753]: Trace[137532577]: ---"Objects listed" error: 11635ms (12:06:54.173) Jan 29 12:06:54 crc kubenswrapper[4753]: Trace[137532577]: [11.635168738s] [11.635168738s] END Jan 29 12:06:54 crc kubenswrapper[4753]: I0129 12:06:54.174194 4753 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Jan 29 12:06:54 crc kubenswrapper[4753]: E0129 12:06:54.175879 4753 kubelet_node_status.go:99] "Unable to register node with API server" err="nodes \"crc\" is forbidden: autoscaling.openshift.io/ManagedNode infra config cache not synchronized" node="crc" Jan 29 12:06:54 crc kubenswrapper[4753]: I0129 12:06:54.176356 4753 reconstruct.go:205] "DevicePaths of reconstructed volumes updated" Jan 29 12:06:54 crc kubenswrapper[4753]: I0129 12:06:54.181070 4753 reflector.go:368] Caches populated for *v1.CertificateSigningRequest from k8s.io/client-go/tools/watch/informerwatcher.go:146 Jan 29 12:06:54 crc kubenswrapper[4753]: I0129 12:06:54.218159 4753 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Liveness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:39864->192.168.126.11:17697: read: connection reset by peer" start-of-body= Jan 29 12:06:54 crc kubenswrapper[4753]: I0129 12:06:54.218279 4753 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:39880->192.168.126.11:17697: read: connection reset by peer" start-of-body= Jan 29 12:06:54 crc kubenswrapper[4753]: I0129 12:06:54.218302 4753 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:39864->192.168.126.11:17697: read: connection reset by peer" Jan 29 12:06:54 crc kubenswrapper[4753]: I0129 12:06:54.218386 4753 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:39880->192.168.126.11:17697: read: connection reset by peer" Jan 29 12:06:54 crc kubenswrapper[4753]: I0129 12:06:54.292519 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-02 14:38:01.865606974 +0000 UTC Jan 29 12:06:54 crc kubenswrapper[4753]: I0129 12:06:54.509730 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Jan 29 12:06:54 crc kubenswrapper[4753]: I0129 12:06:54.510272 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Jan 29 12:06:54 crc kubenswrapper[4753]: I0129 12:06:54.512158 4753 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="34151f1c4f743e8fa0a2c27e644bbea9c3911e2252501ad8aa95ad3e4346222a" exitCode=255 Jan 29 12:06:54 crc kubenswrapper[4753]: I0129 12:06:54.512219 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"34151f1c4f743e8fa0a2c27e644bbea9c3911e2252501ad8aa95ad3e4346222a"} Jan 29 12:06:54 crc kubenswrapper[4753]: I0129 12:06:54.512375 4753 scope.go:117] "RemoveContainer" containerID="49621dd18e83307eaf20b70845e2c308399fbdf84166ab05eec32d1a08f5ae6e" Jan 29 12:06:54 crc kubenswrapper[4753]: I0129 12:06:54.541951 4753 scope.go:117] "RemoveContainer" containerID="34151f1c4f743e8fa0a2c27e644bbea9c3911e2252501ad8aa95ad3e4346222a" Jan 29 12:06:54 crc kubenswrapper[4753]: E0129 12:06:54.542336 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Jan 29 12:06:54 crc kubenswrapper[4753]: I0129 12:06:54.762333 4753 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.008627 4753 apiserver.go:52] "Watching apiserver" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.012938 4753 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.013761 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-network-console/networking-console-plugin-85b44fc459-gdk6g","openshift-network-diagnostics/network-check-source-55646444c4-trplf","openshift-network-diagnostics/network-check-target-xd92c","openshift-network-node-identity/network-node-identity-vrzqb","openshift-network-operator/iptables-alerter-4ln5h","openshift-network-operator/network-operator-58b4c7f79c-55gtf","openshift-kube-apiserver/kube-apiserver-crc"] Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.014733 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.016427 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 12:06:55 crc kubenswrapper[4753]: E0129 12:06:55.016493 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.016571 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 12:06:55 crc kubenswrapper[4753]: E0129 12:06:55.016601 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.016649 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.016925 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.017207 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 12:06:55 crc kubenswrapper[4753]: E0129 12:06:55.017255 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.024535 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.024948 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.025134 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.025326 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.025471 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.025633 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.027739 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.028346 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.030490 4753 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.036838 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.057752 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.075587 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.092117 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"703a6b9d-a15d-4ed3-b00b-db7bd5d42c61\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cab3ab80a8fddbc591cd07fdbed800f525bb8a1b5505bf818ebb59cbcce73003\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7af8be0836a8ff7d08ff48d9487acdedd4b83127538d071d13b3215400c0f8de\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91d57a9cede640bdcec325175598b72dd00398436b730248728c7d9afd545b1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://34151f1c4f743e8fa0a2c27e644bbea9c3911e2252501ad8aa95ad3e4346222a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://49621dd18e83307eaf20b70845e2c308399fbdf84166ab05eec32d1a08f5ae6e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T12:06:36Z\\\",\\\"message\\\":\\\"W0129 12:06:35.081606 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0129 12:06:35.083349 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769688395 cert, and key in /tmp/serving-cert-2092656240/serving-signer.crt, /tmp/serving-cert-2092656240/serving-signer.key\\\\nI0129 12:06:35.840506 1 observer_polling.go:159] Starting file observer\\\\nW0129 12:06:35.845474 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0129 12:06:35.845752 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 12:06:35.849298 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2092656240/tls.crt::/tmp/serving-cert-2092656240/tls.key\\\\\\\"\\\\nF0129 12:06:36.376330 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:34Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34151f1c4f743e8fa0a2c27e644bbea9c3911e2252501ad8aa95ad3e4346222a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T12:06:54Z\\\",\\\"message\\\":\\\"ed a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3028279136/tls.crt::/tmp/serving-cert-3028279136/tls.key\\\\\\\"\\\\nI0129 12:06:54.190978 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 12:06:54.198044 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 12:06:54.198078 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 12:06:54.198139 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 12:06:54.198147 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 12:06:54.208089 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 12:06:54.208122 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 12:06:54.208128 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 12:06:54.208132 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 12:06:54.208135 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 12:06:54.208139 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 12:06:54.208143 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 12:06:54.208164 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI0129 12:06:54.212380 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-3028279136/tls.crt::/tmp/serving-cert-3028279136/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1769688397\\\\\\\\\\\\\\\" (2026-01-29 12:06:37 +0000 UTC to 2026-02-28 12:06:38 +0000 UTC (now=2026-01-29 12:06:54.212317535 +0000 UTC))\\\\\\\"\\\\nF0129 12:06:54.212498 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e02d977e34888952623f4ced5e073d7bf1ff360f8deb435dc577d2ecf146cb2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:33Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6538a934d097cd5a295952f64961a0a8fb5bfa5ebcd27a0a4dd3638029b9cdb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6538a934d097cd5a295952f64961a0a8fb5bfa5ebcd27a0a4dd3638029b9cdb1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.111977 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.112035 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.112067 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.112105 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.112128 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.112154 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.112182 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.112204 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.112247 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.112275 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.112295 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.112366 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.112394 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.112415 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.113044 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.113085 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.113115 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.113147 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.113168 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.113196 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.113239 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.113271 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.113296 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.113319 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.113348 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.113370 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.113392 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.113431 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.113453 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.113472 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.113492 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.113513 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") pod \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\" (UID: \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.113535 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.113567 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.113591 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.113633 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.113656 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.113679 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.113701 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.113736 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.113757 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.113779 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.113801 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.113840 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.113879 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.113902 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.113924 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.113952 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.113977 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.113998 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.114038 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.114074 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.114110 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.114129 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.114182 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.114204 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.114238 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.114261 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.114284 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.114314 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.114335 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.114356 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.114377 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.114397 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.114418 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.114441 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.114468 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.114491 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.114515 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.114545 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.114565 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.114586 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.114603 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.114618 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.114637 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.114659 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.114679 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.114693 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.114710 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.114723 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.114738 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.114754 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.114771 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.114788 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.114803 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.114820 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.114836 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.114852 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") pod \"49ef4625-1d3a-4a9f-b595-c2433d32326d\" (UID: \"49ef4625-1d3a-4a9f-b595-c2433d32326d\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.114868 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.114883 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.114899 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.114915 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.114931 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.114945 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.114960 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") pod \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\" (UID: \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.114976 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.114992 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.115007 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.115027 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.115059 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.115074 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.115089 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.115105 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.115121 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.115136 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.115159 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.115177 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.115193 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.115210 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.115240 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.115258 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.115273 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.115289 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.115305 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.115320 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.115337 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.115355 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.115370 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.115389 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.115403 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.115419 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.115435 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.115450 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.115466 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.115483 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.115499 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.115515 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.115531 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.115548 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.115564 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.115579 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.115595 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.115645 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.115664 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.115680 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.115698 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.115717 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.115736 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.115760 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.115783 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.115801 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.115817 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.115833 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.115854 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.115871 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.115886 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.115902 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.115920 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.115936 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.115951 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.115968 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.115983 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.116001 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.116017 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.116031 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.116047 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.116062 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.116078 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.116094 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.116112 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.116129 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.116145 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.116161 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.116178 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.116194 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.116210 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.116239 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.116256 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.116272 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.116287 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.116304 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.116321 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.116338 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.116354 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.116372 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.116389 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.116636 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.116658 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.116674 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.116694 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.116711 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.116736 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") pod \"44663579-783b-4372-86d6-acf235a62d72\" (UID: \"44663579-783b-4372-86d6-acf235a62d72\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.116754 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.116770 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.116786 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.116802 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.116819 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.116838 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.116857 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.116872 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.116889 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.116906 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.116924 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.116947 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.116990 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.117082 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.117149 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.117172 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.117190 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.117209 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.117239 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.117257 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.117275 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.117292 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.117313 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.117330 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.117357 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.117376 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.117392 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.118739 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" (OuterVolumeSpecName: "images") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.119492 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" (OuterVolumeSpecName: "config") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.119951 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" (OuterVolumeSpecName: "kube-api-access-htfz6") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "kube-api-access-htfz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.120281 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" (OuterVolumeSpecName: "stats-auth") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "stats-auth". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.120920 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.121397 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.121639 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.121902 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" (OuterVolumeSpecName: "kube-api-access-mnrrd") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "kube-api-access-mnrrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.122159 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" (OuterVolumeSpecName: "kube-api-access-8tdtz") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "kube-api-access-8tdtz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.122504 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.122776 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.123165 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" (OuterVolumeSpecName: "utilities") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.123499 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.123991 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.124296 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.156320 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.124573 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.156595 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" (OuterVolumeSpecName: "kube-api-access-6g6sz") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "kube-api-access-6g6sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.157132 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.157397 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.157596 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.157778 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.158080 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" (OuterVolumeSpecName: "kube-api-access-lzf88") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "kube-api-access-lzf88". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.158316 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.158548 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.163457 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.164604 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" (OuterVolumeSpecName: "kube-api-access-pcxfs") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "kube-api-access-pcxfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.165433 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" (OuterVolumeSpecName: "config") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.165800 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.166454 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" (OuterVolumeSpecName: "config") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.166977 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.167340 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" (OuterVolumeSpecName: "kube-api-access-x2m85") pod "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" (UID: "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d"). InnerVolumeSpecName "kube-api-access-x2m85". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.167585 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" (OuterVolumeSpecName: "kube-api-access-zgdk5") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "kube-api-access-zgdk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.167849 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.168338 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.168602 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.168849 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" (OuterVolumeSpecName: "kube-api-access-qg5z5") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "kube-api-access-qg5z5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.168988 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.169124 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.169343 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" (OuterVolumeSpecName: "kube-api-access-w7l8j") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "kube-api-access-w7l8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.169512 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.169519 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" (OuterVolumeSpecName: "config") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.169695 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.169809 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" (OuterVolumeSpecName: "etcd-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.169824 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" (OuterVolumeSpecName: "audit") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "audit". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.170021 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" (OuterVolumeSpecName: "utilities") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.170035 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" (OuterVolumeSpecName: "kube-api-access-d4lsv") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "kube-api-access-d4lsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.170042 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.170106 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" (OuterVolumeSpecName: "package-server-manager-serving-cert") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "package-server-manager-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.170278 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.170343 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.170359 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" (OuterVolumeSpecName: "kube-api-access-fcqwp") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "kube-api-access-fcqwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.170375 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.170540 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" (OuterVolumeSpecName: "kube-api-access-sb6h7") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "kube-api-access-sb6h7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.170599 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.170663 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.170704 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" (OuterVolumeSpecName: "kube-api-access-zkvpv") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "kube-api-access-zkvpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.170907 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.170979 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.171170 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" (OuterVolumeSpecName: "signing-cabundle") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-cabundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.171201 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.171253 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" (OuterVolumeSpecName: "cert") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.171347 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" (OuterVolumeSpecName: "config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.171364 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.171549 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.171570 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.171635 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.171737 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.171874 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" (OuterVolumeSpecName: "config") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.172002 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.172050 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.172089 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" (OuterVolumeSpecName: "tmpfs") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "tmpfs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.172097 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" (OuterVolumeSpecName: "kube-api-access-x4zgh") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "kube-api-access-x4zgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.173178 4753 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.173278 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" (OuterVolumeSpecName: "kube-api-access-dbsvg") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "kube-api-access-dbsvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.173310 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.173382 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" (OuterVolumeSpecName: "config") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.173550 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" (OuterVolumeSpecName: "kube-api-access-lz9wn") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "kube-api-access-lz9wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.173883 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" (OuterVolumeSpecName: "samples-operator-tls") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "samples-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.173961 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" (OuterVolumeSpecName: "service-ca") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.173969 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" (OuterVolumeSpecName: "config") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.174287 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: E0129 12:06:55.174381 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:06:55.674343431 +0000 UTC m=+29.926424956 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.174624 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.174713 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" (OuterVolumeSpecName: "images") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.174744 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" (OuterVolumeSpecName: "signing-key") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.174851 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.175352 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.175355 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" (OuterVolumeSpecName: "config") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.175545 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.175675 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.175989 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" (OuterVolumeSpecName: "client-ca") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.176121 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.176339 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.176744 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.176906 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" (OuterVolumeSpecName: "config") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.177052 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" (OuterVolumeSpecName: "kube-api-access-gf66m") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "kube-api-access-gf66m". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.177348 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.177581 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" (OuterVolumeSpecName: "multus-daemon-config") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "multus-daemon-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.177942 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.178190 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" (OuterVolumeSpecName: "kube-api-access-tk88c") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "kube-api-access-tk88c". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.178400 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" (OuterVolumeSpecName: "kube-api-access-qs4fp") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "kube-api-access-qs4fp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.178754 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.179133 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" (OuterVolumeSpecName: "config") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.179585 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.180044 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" (OuterVolumeSpecName: "mcd-auth-proxy-config") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "mcd-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.180353 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 29 12:06:55 crc kubenswrapper[4753]: E0129 12:06:55.180684 4753 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 29 12:06:55 crc kubenswrapper[4753]: E0129 12:06:55.180729 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-29 12:06:55.680716402 +0000 UTC m=+29.932797857 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.180798 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" (OuterVolumeSpecName: "client-ca") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.180926 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" (OuterVolumeSpecName: "serviceca") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.181183 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" (OuterVolumeSpecName: "kube-api-access-pj782") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "kube-api-access-pj782". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.181526 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" (OuterVolumeSpecName: "ovn-control-plane-metrics-cert") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovn-control-plane-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.181959 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" (OuterVolumeSpecName: "mcc-auth-proxy-config") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "mcc-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.182330 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.182875 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.182944 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" (OuterVolumeSpecName: "default-certificate") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "default-certificate". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.183202 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.183264 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.183521 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.183528 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.183815 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" (OuterVolumeSpecName: "kube-api-access-2d4wz") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "kube-api-access-2d4wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.183816 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.183995 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 29 12:06:55 crc kubenswrapper[4753]: E0129 12:06:55.184095 4753 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 29 12:06:55 crc kubenswrapper[4753]: E0129 12:06:55.184133 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-29 12:06:55.684123125 +0000 UTC m=+29.936204580 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.184141 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" (OuterVolumeSpecName: "kube-api-access-9xfj7") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "kube-api-access-9xfj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.184515 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" (OuterVolumeSpecName: "image-import-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "image-import-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.184520 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.184550 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" (OuterVolumeSpecName: "etcd-service-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.184740 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" (OuterVolumeSpecName: "kube-api-access-bf2bz") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "kube-api-access-bf2bz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.185061 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.453719 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.454416 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" (OuterVolumeSpecName: "kube-api-access-249nr") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "kube-api-access-249nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.459287 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" (OuterVolumeSpecName: "service-ca") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.459783 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" (OuterVolumeSpecName: "kube-api-access-6ccd8") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "kube-api-access-6ccd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.460264 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.460633 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" (OuterVolumeSpecName: "kube-api-access-wxkg8") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "kube-api-access-wxkg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.461060 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.461052 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.461478 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" (OuterVolumeSpecName: "kube-api-access-ngvvp") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "kube-api-access-ngvvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.461531 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.461856 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" (OuterVolumeSpecName: "kube-api-access-vt5rc") pod "44663579-783b-4372-86d6-acf235a62d72" (UID: "44663579-783b-4372-86d6-acf235a62d72"). InnerVolumeSpecName "kube-api-access-vt5rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.461925 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" (OuterVolumeSpecName: "webhook-certs") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "webhook-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.462263 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" (OuterVolumeSpecName: "kube-api-access-jkwtn") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "kube-api-access-jkwtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.462474 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.462583 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-01 08:29:52.485241329 +0000 UTC Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.462645 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" (OuterVolumeSpecName: "console-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.463284 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.463519 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" (OuterVolumeSpecName: "kube-api-access-jhbk2") pod "bd23aa5c-e532-4e53-bccf-e79f130c5ae8" (UID: "bd23aa5c-e532-4e53-bccf-e79f130c5ae8"). InnerVolumeSpecName "kube-api-access-jhbk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.463647 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" (OuterVolumeSpecName: "kube-api-access-x7zkh") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "kube-api-access-x7zkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.464167 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" (OuterVolumeSpecName: "kube-api-access-w4xd4") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "kube-api-access-w4xd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.464527 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.464533 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.465010 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" (OuterVolumeSpecName: "kube-api-access-xcphl") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "kube-api-access-xcphl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.465209 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" (OuterVolumeSpecName: "kube-api-access-xcgwh") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "kube-api-access-xcgwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.465357 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.465608 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.466182 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.466251 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.466511 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" (OuterVolumeSpecName: "kube-api-access-4d4hj") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "kube-api-access-4d4hj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.466641 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" (OuterVolumeSpecName: "certs") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.466772 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" (OuterVolumeSpecName: "kube-api-access-fqsjt") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "kube-api-access-fqsjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.467040 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" (OuterVolumeSpecName: "machine-approver-tls") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "machine-approver-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.467232 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" (OuterVolumeSpecName: "kube-api-access-7c4vf") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "kube-api-access-7c4vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.467670 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.467790 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" (OuterVolumeSpecName: "available-featuregates") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "available-featuregates". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.470753 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" (OuterVolumeSpecName: "utilities") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.467922 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" (OuterVolumeSpecName: "kube-api-access-pjr6v") pod "49ef4625-1d3a-4a9f-b595-c2433d32326d" (UID: "49ef4625-1d3a-4a9f-b595-c2433d32326d"). InnerVolumeSpecName "kube-api-access-pjr6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.468141 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" (OuterVolumeSpecName: "node-bootstrap-token") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "node-bootstrap-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.468435 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" (OuterVolumeSpecName: "config-volume") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.468441 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" (OuterVolumeSpecName: "kube-api-access-w9rds") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "kube-api-access-w9rds". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.468467 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" (OuterVolumeSpecName: "kube-api-access-cfbct") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "kube-api-access-cfbct". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.468789 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" (OuterVolumeSpecName: "kube-api-access-rnphk") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "kube-api-access-rnphk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.468843 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" (OuterVolumeSpecName: "config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.468901 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" (OuterVolumeSpecName: "machine-api-operator-tls") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "machine-api-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.468920 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" (OuterVolumeSpecName: "config") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.469061 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" (OuterVolumeSpecName: "image-registry-operator-tls") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "image-registry-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.469254 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" (OuterVolumeSpecName: "kube-api-access-mg5zb") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "kube-api-access-mg5zb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.469305 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" (OuterVolumeSpecName: "kube-api-access-279lb") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "kube-api-access-279lb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.469491 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" (OuterVolumeSpecName: "kube-api-access-nzwt7") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "kube-api-access-nzwt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.469542 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.470065 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" (OuterVolumeSpecName: "utilities") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.470088 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.470362 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" (OuterVolumeSpecName: "kube-api-access-v47cf") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "kube-api-access-v47cf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.470400 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" (OuterVolumeSpecName: "kube-api-access-d6qdx") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "kube-api-access-d6qdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.463647 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.470987 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.471154 4753 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.471173 4753 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.471183 4753 reconciler_common.go:293] "Volume detached for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.471193 4753 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.471202 4753 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.471210 4753 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.471218 4753 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.471239 4753 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.471250 4753 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.471258 4753 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.471266 4753 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.471274 4753 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.471292 4753 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.471310 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.471323 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.471341 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.471360 4753 reconciler_common.go:293] "Volume detached for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.471376 4753 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.471390 4753 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.471407 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.471416 4753 reconciler_common.go:293] "Volume detached for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.471417 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.471429 4753 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.471474 4753 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.471489 4753 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.471503 4753 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.471514 4753 reconciler_common.go:293] "Volume detached for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.471526 4753 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.471537 4753 reconciler_common.go:293] "Volume detached for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.471548 4753 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.471559 4753 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.471571 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.471583 4753 reconciler_common.go:293] "Volume detached for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.471595 4753 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.471614 4753 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.471629 4753 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.471639 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.471642 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.471695 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.471706 4753 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.471716 4753 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.471740 4753 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.471753 4753 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.471762 4753 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.471770 4753 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.471778 4753 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.471786 4753 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.471794 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.471805 4753 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.471813 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.471824 4753 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.471838 4753 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.471846 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.471855 4753 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.471863 4753 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.471873 4753 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.471881 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.471888 4753 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.471896 4753 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.471904 4753 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.471917 4753 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.471929 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.471942 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.471950 4753 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.471958 4753 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.471975 4753 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.471983 4753 reconciler_common.go:293] "Volume detached for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.471991 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.472001 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.472009 4753 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.472017 4753 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.472025 4753 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.472033 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.472042 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.472041 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" (OuterVolumeSpecName: "kube-api-access-kfwg7") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "kube-api-access-kfwg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.472050 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.472088 4753 reconciler_common.go:293] "Volume detached for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.472099 4753 reconciler_common.go:293] "Volume detached for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.472121 4753 reconciler_common.go:293] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.472140 4753 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.472159 4753 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.472186 4753 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.472201 4753 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.472218 4753 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.472251 4753 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.472260 4753 reconciler_common.go:293] "Volume detached for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.472377 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.472395 4753 reconciler_common.go:293] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.472422 4753 reconciler_common.go:293] "Volume detached for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.472440 4753 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.472614 4753 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.472628 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.472639 4753 reconciler_common.go:293] "Volume detached for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.472648 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.472659 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.472677 4753 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.472699 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.472718 4753 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.472733 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.472781 4753 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.472824 4753 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.472840 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.472864 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.472878 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.472887 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.472897 4753 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.472911 4753 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.472921 4753 reconciler_common.go:293] "Volume detached for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.472930 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.472939 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.472961 4753 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.472985 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.473000 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.473011 4753 reconciler_common.go:293] "Volume detached for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.473021 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.473029 4753 reconciler_common.go:293] "Volume detached for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.473038 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.473046 4753 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.473054 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.473063 4753 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.473071 4753 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.473079 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.473162 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.473180 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.473194 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.473211 4753 reconciler_common.go:293] "Volume detached for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.473243 4753 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.473261 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.473270 4753 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.473278 4753 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.473286 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.473294 4753 reconciler_common.go:293] "Volume detached for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.473303 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.473311 4753 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.473321 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.473329 4753 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.473337 4753 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.473351 4753 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.473364 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.473380 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.473400 4753 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.473418 4753 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.473436 4753 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.473450 4753 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.473467 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.473476 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.473484 4753 reconciler_common.go:293] "Volume detached for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.473492 4753 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.473501 4753 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.473509 4753 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.473517 4753 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.473525 4753 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.473533 4753 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.473541 4753 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.473582 4753 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.473591 4753 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.473598 4753 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.473606 4753 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.473615 4753 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.473630 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.473639 4753 reconciler_common.go:293] "Volume detached for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.473684 4753 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.473733 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.473758 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.473836 4753 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.473856 4753 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.473897 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.473915 4753 reconciler_common.go:293] "Volume detached for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.473933 4753 reconciler_common.go:293] "Volume detached for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.473955 4753 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.473969 4753 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.474000 4753 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.474029 4753 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.474039 4753 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.474048 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.474056 4753 reconciler_common.go:293] "Volume detached for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.474064 4753 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.474072 4753 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.474085 4753 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.464762 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.463683 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.474428 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.476291 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.476826 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" (OuterVolumeSpecName: "kube-api-access-s4n52") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "kube-api-access-s4n52". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.477551 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.478044 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" (OuterVolumeSpecName: "config") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.478492 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" (OuterVolumeSpecName: "control-plane-machine-set-operator-tls") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "control-plane-machine-set-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.478713 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" (OuterVolumeSpecName: "kube-api-access-2w9zh") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "kube-api-access-2w9zh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.480200 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.480317 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.500365 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.500429 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.500834 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" (OuterVolumeSpecName: "config") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.509236 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.509713 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.518124 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.521362 4753 scope.go:117] "RemoveContainer" containerID="34151f1c4f743e8fa0a2c27e644bbea9c3911e2252501ad8aa95ad3e4346222a" Jan 29 12:06:55 crc kubenswrapper[4753]: E0129 12:06:55.521792 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.522502 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.547055 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.574897 4753 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.575331 4753 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.575441 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.575583 4753 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.575687 4753 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.575803 4753 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.575999 4753 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.576110 4753 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.576267 4753 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.576378 4753 reconciler_common.go:293] "Volume detached for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.576492 4753 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.576633 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.576809 4753 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.576943 4753 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.577068 4753 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.577190 4753 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.577350 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.577468 4753 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.577605 4753 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.678497 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:06:55 crc kubenswrapper[4753]: E0129 12:06:55.679256 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:06:56.679216157 +0000 UTC m=+30.931297612 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.768615 4753 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.769618 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.779617 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.779952 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 12:06:55 crc kubenswrapper[4753]: E0129 12:06:55.779797 4753 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 29 12:06:55 crc kubenswrapper[4753]: E0129 12:06:55.780084 4753 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 29 12:06:55 crc kubenswrapper[4753]: E0129 12:06:55.780372 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-29 12:06:56.780246882 +0000 UTC m=+31.032328337 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 29 12:06:55 crc kubenswrapper[4753]: E0129 12:06:55.780493 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-29 12:06:56.780478949 +0000 UTC m=+31.032560414 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 29 12:06:55 crc kubenswrapper[4753]: I0129 12:06:55.783265 4753 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 29 12:06:56 crc kubenswrapper[4753]: E0129 12:06:56.008000 4753 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 29 12:06:56 crc kubenswrapper[4753]: E0129 12:06:56.008042 4753 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 29 12:06:56 crc kubenswrapper[4753]: E0129 12:06:56.008065 4753 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 12:06:56 crc kubenswrapper[4753]: E0129 12:06:56.008111 4753 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 29 12:06:56 crc kubenswrapper[4753]: E0129 12:06:56.008138 4753 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 29 12:06:56 crc kubenswrapper[4753]: E0129 12:06:56.008147 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-29 12:06:56.508125998 +0000 UTC m=+30.760207453 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 12:06:56 crc kubenswrapper[4753]: E0129 12:06:56.008150 4753 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 12:06:56 crc kubenswrapper[4753]: E0129 12:06:56.008184 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-29 12:06:56.508177789 +0000 UTC m=+30.760259244 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.009414 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.012185 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.020871 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.023495 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01ab3dd5-8196-46d0-ad33-122e2ca51def" path="/var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes" Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.024411 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" path="/var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes" Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.026502 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09efc573-dbb6-4249-bd59-9b87aba8dd28" path="/var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes" Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.027249 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b574797-001e-440a-8f4e-c0be86edad0f" path="/var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes" Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.030279 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b78653f-4ff9-4508-8672-245ed9b561e3" path="/var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes" Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.031025 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1386a44e-36a2-460c-96d0-0359d2b6f0f5" path="/var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes" Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.031984 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bf7eb37-55a3-4c65-b768-a94c82151e69" path="/var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes" Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.041617 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d611f23-29be-4491-8495-bee1670e935f" path="/var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes" Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.042074 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.042018 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.042554 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b0d48f-5fd6-431c-a545-e3c800c7b866" path="/var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/volumes" Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.043977 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" path="/var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes" Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.045577 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22c825df-677d-4ca6-82db-3454ed06e783" path="/var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes" Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.048215 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25e176fe-21b4-4974-b1ed-c8b94f112a7f" path="/var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes" Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.050883 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" path="/var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes" Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.052658 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31d8b7a1-420e-4252-a5b7-eebe8a111292" path="/var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes" Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.053624 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ab1a177-2de0-46d9-b765-d0d0649bb42e" path="/var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/volumes" Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.054993 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" path="/var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes" Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.055784 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43509403-f426-496e-be36-56cef71462f5" path="/var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes" Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.056257 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44663579-783b-4372-86d6-acf235a62d72" path="/var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/volumes" Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.057501 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="496e6271-fb68-4057-954e-a0d97a4afa3f" path="/var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes" Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.058706 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.059118 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" path="/var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes" Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.060173 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49ef4625-1d3a-4a9f-b595-c2433d32326d" path="/var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/volumes" Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.064281 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bb40260-dbaa-4fb0-84df-5e680505d512" path="/var/lib/kubelet/pods/4bb40260-dbaa-4fb0-84df-5e680505d512/volumes" Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.064773 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5225d0e4-402f-4861-b410-819f433b1803" path="/var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes" Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.066574 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5441d097-087c-4d9a-baa8-b210afa90fc9" path="/var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes" Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.067350 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57a731c4-ef35-47a8-b875-bfb08a7f8011" path="/var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes" Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.068066 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b88f790-22fa-440e-b583-365168c0b23d" path="/var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/volumes" Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.070618 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fe579f8-e8a6-4643-bce5-a661393c4dde" path="/var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/volumes" Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.071360 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6402fda4-df10-493c-b4e5-d0569419652d" path="/var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes" Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.072767 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6509e943-70c6-444c-bc41-48a544e36fbd" path="/var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes" Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.072787 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"703a6b9d-a15d-4ed3-b00b-db7bd5d42c61\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cab3ab80a8fddbc591cd07fdbed800f525bb8a1b5505bf818ebb59cbcce73003\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7af8be0836a8ff7d08ff48d9487acdedd4b83127538d071d13b3215400c0f8de\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91d57a9cede640bdcec325175598b72dd00398436b730248728c7d9afd545b1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://34151f1c4f743e8fa0a2c27e644bbea9c3911e2252501ad8aa95ad3e4346222a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34151f1c4f743e8fa0a2c27e644bbea9c3911e2252501ad8aa95ad3e4346222a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T12:06:54Z\\\",\\\"message\\\":\\\"ed a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3028279136/tls.crt::/tmp/serving-cert-3028279136/tls.key\\\\\\\"\\\\nI0129 12:06:54.190978 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 12:06:54.198044 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 12:06:54.198078 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 12:06:54.198139 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 12:06:54.198147 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 12:06:54.208089 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 12:06:54.208122 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 12:06:54.208128 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 12:06:54.208132 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 12:06:54.208135 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 12:06:54.208139 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 12:06:54.208143 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 12:06:54.208164 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI0129 12:06:54.212380 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-3028279136/tls.crt::/tmp/serving-cert-3028279136/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1769688397\\\\\\\\\\\\\\\" (2026-01-29 12:06:37 +0000 UTC to 2026-02-28 12:06:38 +0000 UTC (now=2026-01-29 12:06:54.212317535 +0000 UTC))\\\\\\\"\\\\nF0129 12:06:54.212498 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e02d977e34888952623f4ced5e073d7bf1ff360f8deb435dc577d2ecf146cb2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:33Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6538a934d097cd5a295952f64961a0a8fb5bfa5ebcd27a0a4dd3638029b9cdb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6538a934d097cd5a295952f64961a0a8fb5bfa5ebcd27a0a4dd3638029b9cdb1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.073330 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6731426b-95fe-49ff-bb5f-40441049fde2" path="/var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/volumes" Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.075442 4753 kubelet_volumes.go:152] "Cleaned up orphaned volume subpath from pod" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volume-subpaths/run-systemd/ovnkube-controller/6" Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.075548 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volumes" Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.078697 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7539238d-5fe0-46ed-884e-1c3b566537ec" path="/var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes" Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.079737 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7583ce53-e0fe-4a16-9e4d-50516596a136" path="/var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes" Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.080285 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bb08738-c794-4ee8-9972-3a62ca171029" path="/var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes" Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.082502 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87cf06ed-a83f-41a7-828d-70653580a8cb" path="/var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes" Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.085399 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" path="/var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes" Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.086250 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="925f1c65-6136-48ba-85aa-3a3b50560753" path="/var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes" Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.089448 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.091640 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" path="/var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/volumes" Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.092937 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d4552c7-cd75-42dd-8880-30dd377c49a4" path="/var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes" Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.094180 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" path="/var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/volumes" Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.095114 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a31745f5-9847-4afe-82a5-3161cc66ca93" path="/var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes" Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.097842 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" path="/var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes" Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.098709 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6312bbd-5731-4ea0-a20f-81d5a57df44a" path="/var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/volumes" Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.099887 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" path="/var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes" Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.100651 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" path="/var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes" Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.101792 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" path="/var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/volumes" Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.102971 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf126b07-da06-4140-9a57-dfd54fc6b486" path="/var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes" Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.104083 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c03ee662-fb2f-4fc4-a2c1-af487c19d254" path="/var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes" Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.104548 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.105321 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" path="/var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/volumes" Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.105920 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7e6199b-1264-4501-8953-767f51328d08" path="/var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes" Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.107288 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efdd0498-1daa-4136-9a4a-3b948c2293fc" path="/var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/volumes" Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.108044 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" path="/var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/volumes" Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.109692 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fda69060-fa79-4696-b1a6-7980f124bf7c" path="/var/lib/kubelet/pods/fda69060-fa79-4696-b1a6-7980f124bf7c/volumes" Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.110259 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/kube-controller-manager-crc"] Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.110350 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.116707 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.128001 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.140048 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.153638 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.167086 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"703a6b9d-a15d-4ed3-b00b-db7bd5d42c61\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cab3ab80a8fddbc591cd07fdbed800f525bb8a1b5505bf818ebb59cbcce73003\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7af8be0836a8ff7d08ff48d9487acdedd4b83127538d071d13b3215400c0f8de\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91d57a9cede640bdcec325175598b72dd00398436b730248728c7d9afd545b1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://34151f1c4f743e8fa0a2c27e644bbea9c3911e2252501ad8aa95ad3e4346222a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34151f1c4f743e8fa0a2c27e644bbea9c3911e2252501ad8aa95ad3e4346222a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T12:06:54Z\\\",\\\"message\\\":\\\"ed a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3028279136/tls.crt::/tmp/serving-cert-3028279136/tls.key\\\\\\\"\\\\nI0129 12:06:54.190978 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 12:06:54.198044 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 12:06:54.198078 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 12:06:54.198139 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 12:06:54.198147 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 12:06:54.208089 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 12:06:54.208122 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 12:06:54.208128 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 12:06:54.208132 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 12:06:54.208135 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 12:06:54.208139 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 12:06:54.208143 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 12:06:54.208164 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI0129 12:06:54.212380 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-3028279136/tls.crt::/tmp/serving-cert-3028279136/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1769688397\\\\\\\\\\\\\\\" (2026-01-29 12:06:37 +0000 UTC to 2026-02-28 12:06:38 +0000 UTC (now=2026-01-29 12:06:54.212317535 +0000 UTC))\\\\\\\"\\\\nF0129 12:06:54.212498 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e02d977e34888952623f4ced5e073d7bf1ff360f8deb435dc577d2ecf146cb2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:33Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6538a934d097cd5a295952f64961a0a8fb5bfa5ebcd27a0a4dd3638029b9cdb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6538a934d097cd5a295952f64961a0a8fb5bfa5ebcd27a0a4dd3638029b9cdb1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.476033 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.476090 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.476212 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.475816 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-13 17:34:42.849393193 +0000 UTC Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.491004 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.513665 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.530672 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"11f318e55d920ee34d78c41f1c768ad1af686f2144e45e7ef632745bba388476"} Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.537888 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"1007becdc22d9638f168b7b014308629b9a1aab959bd05716284ea03fb8f5e48"} Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.539975 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"1eb0560db0648ad25ba04c6bb9d78013277fe6ceb4a38338f548716bca221bf3"} Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.540925 4753 scope.go:117] "RemoveContainer" containerID="34151f1c4f743e8fa0a2c27e644bbea9c3911e2252501ad8aa95ad3e4346222a" Jan 29 12:06:56 crc kubenswrapper[4753]: E0129 12:06:56.541126 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.544563 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.704426 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.704594 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.704640 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 12:06:56 crc kubenswrapper[4753]: E0129 12:06:56.704973 4753 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 29 12:06:56 crc kubenswrapper[4753]: E0129 12:06:56.705017 4753 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 29 12:06:56 crc kubenswrapper[4753]: E0129 12:06:56.705045 4753 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 12:06:56 crc kubenswrapper[4753]: E0129 12:06:56.705179 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-29 12:06:57.705152266 +0000 UTC m=+31.957233721 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 12:06:56 crc kubenswrapper[4753]: E0129 12:06:56.706086 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:06:58.706073393 +0000 UTC m=+32.958154858 (durationBeforeRetry 2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:06:56 crc kubenswrapper[4753]: E0129 12:06:56.706190 4753 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 29 12:06:56 crc kubenswrapper[4753]: E0129 12:06:56.706208 4753 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 29 12:06:56 crc kubenswrapper[4753]: E0129 12:06:56.706241 4753 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 12:06:56 crc kubenswrapper[4753]: E0129 12:06:56.706292 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-29 12:06:57.7062813 +0000 UTC m=+31.958362765 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.714315 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.723578 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.805617 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.805734 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 12:06:56 crc kubenswrapper[4753]: E0129 12:06:56.805796 4753 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 29 12:06:56 crc kubenswrapper[4753]: E0129 12:06:56.805893 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-29 12:06:58.805869621 +0000 UTC m=+33.057951076 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 29 12:06:56 crc kubenswrapper[4753]: E0129 12:06:56.806720 4753 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 29 12:06:56 crc kubenswrapper[4753]: E0129 12:06:56.806861 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-29 12:06:58.80683965 +0000 UTC m=+33.058921105 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.888113 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.888142 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.888182 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 12:06:56 crc kubenswrapper[4753]: E0129 12:06:56.888414 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 12:06:56 crc kubenswrapper[4753]: E0129 12:06:56.888751 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 12:06:56 crc kubenswrapper[4753]: E0129 12:06:56.888903 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.929611 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.948631 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b108c546-a8c1-4bf9-814b-bfb5871b3013\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9842d9d6fed8c87bc20835964d3271b0be3117fedc658e6ef028db422d1ad478\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4fd4169036d3a646c9e5e4bfa8d86a92c557c3d8f77608ae5308dd0b8c45517\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b4a6bbb431e321e54013c96e3cca1537283cbdf65d89a978780550d603663fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://31bd7db7bddd3e16cb455f7af9fd3a9bca49919c8f1ffb676dec83c543ba7763\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.972803 4753 csr.go:261] certificate signing request csr-tzbz7 is approved, waiting to be issued Jan 29 12:06:56 crc kubenswrapper[4753]: I0129 12:06:56.974648 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 12:06:57 crc kubenswrapper[4753]: I0129 12:06:57.102977 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"703a6b9d-a15d-4ed3-b00b-db7bd5d42c61\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cab3ab80a8fddbc591cd07fdbed800f525bb8a1b5505bf818ebb59cbcce73003\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7af8be0836a8ff7d08ff48d9487acdedd4b83127538d071d13b3215400c0f8de\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91d57a9cede640bdcec325175598b72dd00398436b730248728c7d9afd545b1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://34151f1c4f743e8fa0a2c27e644bbea9c3911e2252501ad8aa95ad3e4346222a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34151f1c4f743e8fa0a2c27e644bbea9c3911e2252501ad8aa95ad3e4346222a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T12:06:54Z\\\",\\\"message\\\":\\\"ed a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3028279136/tls.crt::/tmp/serving-cert-3028279136/tls.key\\\\\\\"\\\\nI0129 12:06:54.190978 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 12:06:54.198044 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 12:06:54.198078 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 12:06:54.198139 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 12:06:54.198147 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 12:06:54.208089 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 12:06:54.208122 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 12:06:54.208128 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 12:06:54.208132 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 12:06:54.208135 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 12:06:54.208139 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 12:06:54.208143 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 12:06:54.208164 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI0129 12:06:54.212380 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-3028279136/tls.crt::/tmp/serving-cert-3028279136/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1769688397\\\\\\\\\\\\\\\" (2026-01-29 12:06:37 +0000 UTC to 2026-02-28 12:06:38 +0000 UTC (now=2026-01-29 12:06:54.212317535 +0000 UTC))\\\\\\\"\\\\nF0129 12:06:54.212498 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e02d977e34888952623f4ced5e073d7bf1ff360f8deb435dc577d2ecf146cb2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:33Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6538a934d097cd5a295952f64961a0a8fb5bfa5ebcd27a0a4dd3638029b9cdb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6538a934d097cd5a295952f64961a0a8fb5bfa5ebcd27a0a4dd3638029b9cdb1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 12:06:57 crc kubenswrapper[4753]: I0129 12:06:57.129766 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 12:06:57 crc kubenswrapper[4753]: I0129 12:06:57.277549 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 12:06:57 crc kubenswrapper[4753]: I0129 12:06:57.285654 4753 csr.go:257] certificate signing request csr-tzbz7 is issued Jan 29 12:06:57 crc kubenswrapper[4753]: I0129 12:06:57.521164 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-26 04:57:03.267421748 +0000 UTC Jan 29 12:06:57 crc kubenswrapper[4753]: I0129 12:06:57.577957 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b108c546-a8c1-4bf9-814b-bfb5871b3013\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9842d9d6fed8c87bc20835964d3271b0be3117fedc658e6ef028db422d1ad478\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4fd4169036d3a646c9e5e4bfa8d86a92c557c3d8f77608ae5308dd0b8c45517\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b4a6bbb431e321e54013c96e3cca1537283cbdf65d89a978780550d603663fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://31bd7db7bddd3e16cb455f7af9fd3a9bca49919c8f1ffb676dec83c543ba7763\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 12:06:57 crc kubenswrapper[4753]: I0129 12:06:57.589132 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"5d7ccf10988fd83d31d42be6f82403067a0cc50313c4d5dcddfb528507efffa4"} Jan 29 12:06:57 crc kubenswrapper[4753]: I0129 12:06:57.595241 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"ea7c92ec02a9cf6ec5fbd8efe22a066dfef9b48f042137964c5dd49578c99079"} Jan 29 12:06:57 crc kubenswrapper[4753]: I0129 12:06:57.596080 4753 scope.go:117] "RemoveContainer" containerID="34151f1c4f743e8fa0a2c27e644bbea9c3911e2252501ad8aa95ad3e4346222a" Jan 29 12:06:57 crc kubenswrapper[4753]: E0129 12:06:57.596391 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Jan 29 12:06:57 crc kubenswrapper[4753]: I0129 12:06:57.601989 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 12:06:57 crc kubenswrapper[4753]: I0129 12:06:57.609077 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/node-ca-x8kzr"] Jan 29 12:06:57 crc kubenswrapper[4753]: I0129 12:06:57.609711 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-x8kzr" Jan 29 12:06:57 crc kubenswrapper[4753]: I0129 12:06:57.615453 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Jan 29 12:06:57 crc kubenswrapper[4753]: I0129 12:06:57.618135 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Jan 29 12:06:57 crc kubenswrapper[4753]: I0129 12:06:57.618437 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Jan 29 12:06:57 crc kubenswrapper[4753]: I0129 12:06:57.620639 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Jan 29 12:06:57 crc kubenswrapper[4753]: I0129 12:06:57.622548 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/d753320a-61c2-4c0e-bd48-96d74b352114-host\") pod \"node-ca-x8kzr\" (UID: \"d753320a-61c2-4c0e-bd48-96d74b352114\") " pod="openshift-image-registry/node-ca-x8kzr" Jan 29 12:06:57 crc kubenswrapper[4753]: I0129 12:06:57.622647 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/d753320a-61c2-4c0e-bd48-96d74b352114-serviceca\") pod \"node-ca-x8kzr\" (UID: \"d753320a-61c2-4c0e-bd48-96d74b352114\") " pod="openshift-image-registry/node-ca-x8kzr" Jan 29 12:06:57 crc kubenswrapper[4753]: I0129 12:06:57.622699 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tgns4\" (UniqueName: \"kubernetes.io/projected/d753320a-61c2-4c0e-bd48-96d74b352114-kube-api-access-tgns4\") pod \"node-ca-x8kzr\" (UID: \"d753320a-61c2-4c0e-bd48-96d74b352114\") " pod="openshift-image-registry/node-ca-x8kzr" Jan 29 12:06:57 crc kubenswrapper[4753]: I0129 12:06:57.631625 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 12:06:57 crc kubenswrapper[4753]: I0129 12:06:57.922652 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 12:06:57 crc kubenswrapper[4753]: I0129 12:06:57.922865 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/d753320a-61c2-4c0e-bd48-96d74b352114-host\") pod \"node-ca-x8kzr\" (UID: \"d753320a-61c2-4c0e-bd48-96d74b352114\") " pod="openshift-image-registry/node-ca-x8kzr" Jan 29 12:06:57 crc kubenswrapper[4753]: I0129 12:06:57.922952 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/d753320a-61c2-4c0e-bd48-96d74b352114-serviceca\") pod \"node-ca-x8kzr\" (UID: \"d753320a-61c2-4c0e-bd48-96d74b352114\") " pod="openshift-image-registry/node-ca-x8kzr" Jan 29 12:06:57 crc kubenswrapper[4753]: I0129 12:06:57.923023 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tgns4\" (UniqueName: \"kubernetes.io/projected/d753320a-61c2-4c0e-bd48-96d74b352114-kube-api-access-tgns4\") pod \"node-ca-x8kzr\" (UID: \"d753320a-61c2-4c0e-bd48-96d74b352114\") " pod="openshift-image-registry/node-ca-x8kzr" Jan 29 12:06:57 crc kubenswrapper[4753]: I0129 12:06:57.923066 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 12:06:57 crc kubenswrapper[4753]: I0129 12:06:57.923565 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/d753320a-61c2-4c0e-bd48-96d74b352114-host\") pod \"node-ca-x8kzr\" (UID: \"d753320a-61c2-4c0e-bd48-96d74b352114\") " pod="openshift-image-registry/node-ca-x8kzr" Jan 29 12:06:57 crc kubenswrapper[4753]: E0129 12:06:57.924167 4753 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 29 12:06:57 crc kubenswrapper[4753]: E0129 12:06:57.924326 4753 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 29 12:06:57 crc kubenswrapper[4753]: E0129 12:06:57.924413 4753 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 12:06:57 crc kubenswrapper[4753]: E0129 12:06:57.924814 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-29 12:06:59.924713318 +0000 UTC m=+34.176794923 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 12:06:57 crc kubenswrapper[4753]: E0129 12:06:57.924168 4753 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 29 12:06:57 crc kubenswrapper[4753]: E0129 12:06:57.924917 4753 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 29 12:06:57 crc kubenswrapper[4753]: E0129 12:06:57.924929 4753 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 12:06:57 crc kubenswrapper[4753]: E0129 12:06:57.924964 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-29 12:06:59.924955406 +0000 UTC m=+34.177037051 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 12:06:57 crc kubenswrapper[4753]: I0129 12:06:57.930760 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/d753320a-61c2-4c0e-bd48-96d74b352114-serviceca\") pod \"node-ca-x8kzr\" (UID: \"d753320a-61c2-4c0e-bd48-96d74b352114\") " pod="openshift-image-registry/node-ca-x8kzr" Jan 29 12:06:57 crc kubenswrapper[4753]: I0129 12:06:57.931635 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 12:06:57 crc kubenswrapper[4753]: E0129 12:06:57.932288 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 12:06:57 crc kubenswrapper[4753]: I0129 12:06:57.955450 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tgns4\" (UniqueName: \"kubernetes.io/projected/d753320a-61c2-4c0e-bd48-96d74b352114-kube-api-access-tgns4\") pod \"node-ca-x8kzr\" (UID: \"d753320a-61c2-4c0e-bd48-96d74b352114\") " pod="openshift-image-registry/node-ca-x8kzr" Jan 29 12:06:57 crc kubenswrapper[4753]: I0129 12:06:57.959809 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 12:06:57 crc kubenswrapper[4753]: I0129 12:06:57.974218 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 12:06:57 crc kubenswrapper[4753]: I0129 12:06:57.984220 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 12:06:58 crc kubenswrapper[4753]: I0129 12:06:58.001283 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 12:06:58 crc kubenswrapper[4753]: I0129 12:06:58.016361 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 12:06:58 crc kubenswrapper[4753]: I0129 12:06:58.335323 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-x8kzr" Jan 29 12:06:58 crc kubenswrapper[4753]: I0129 12:06:58.335322 4753 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2027-01-29 12:01:57 +0000 UTC, rotation deadline is 2026-10-27 14:09:35.248717826 +0000 UTC Jan 29 12:06:58 crc kubenswrapper[4753]: I0129 12:06:58.335684 4753 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Waiting 6506h2m36.913038753s for next certificate rotation Jan 29 12:06:58 crc kubenswrapper[4753]: I0129 12:06:58.338155 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 12:06:58 crc kubenswrapper[4753]: I0129 12:06:58.354374 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b108c546-a8c1-4bf9-814b-bfb5871b3013\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9842d9d6fed8c87bc20835964d3271b0be3117fedc658e6ef028db422d1ad478\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4fd4169036d3a646c9e5e4bfa8d86a92c557c3d8f77608ae5308dd0b8c45517\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b4a6bbb431e321e54013c96e3cca1537283cbdf65d89a978780550d603663fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://31bd7db7bddd3e16cb455f7af9fd3a9bca49919c8f1ffb676dec83c543ba7763\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 12:06:58 crc kubenswrapper[4753]: I0129 12:06:58.373816 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 12:06:58 crc kubenswrapper[4753]: I0129 12:06:58.389936 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-x8kzr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d753320a-61c2-4c0e-bd48-96d74b352114\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tgns4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-x8kzr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 12:06:58 crc kubenswrapper[4753]: I0129 12:06:58.405875 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"703a6b9d-a15d-4ed3-b00b-db7bd5d42c61\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cab3ab80a8fddbc591cd07fdbed800f525bb8a1b5505bf818ebb59cbcce73003\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7af8be0836a8ff7d08ff48d9487acdedd4b83127538d071d13b3215400c0f8de\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91d57a9cede640bdcec325175598b72dd00398436b730248728c7d9afd545b1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://34151f1c4f743e8fa0a2c27e644bbea9c3911e2252501ad8aa95ad3e4346222a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34151f1c4f743e8fa0a2c27e644bbea9c3911e2252501ad8aa95ad3e4346222a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T12:06:54Z\\\",\\\"message\\\":\\\"ed a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3028279136/tls.crt::/tmp/serving-cert-3028279136/tls.key\\\\\\\"\\\\nI0129 12:06:54.190978 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 12:06:54.198044 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 12:06:54.198078 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 12:06:54.198139 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 12:06:54.198147 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 12:06:54.208089 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 12:06:54.208122 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 12:06:54.208128 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 12:06:54.208132 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 12:06:54.208135 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 12:06:54.208139 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 12:06:54.208143 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 12:06:54.208164 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI0129 12:06:54.212380 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-3028279136/tls.crt::/tmp/serving-cert-3028279136/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1769688397\\\\\\\\\\\\\\\" (2026-01-29 12:06:37 +0000 UTC to 2026-02-28 12:06:38 +0000 UTC (now=2026-01-29 12:06:54.212317535 +0000 UTC))\\\\\\\"\\\\nF0129 12:06:54.212498 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e02d977e34888952623f4ced5e073d7bf1ff360f8deb435dc577d2ecf146cb2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:33Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6538a934d097cd5a295952f64961a0a8fb5bfa5ebcd27a0a4dd3638029b9cdb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6538a934d097cd5a295952f64961a0a8fb5bfa5ebcd27a0a4dd3638029b9cdb1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 12:06:58 crc kubenswrapper[4753]: I0129 12:06:58.417869 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5d7ccf10988fd83d31d42be6f82403067a0cc50313c4d5dcddfb528507efffa4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 12:06:58 crc kubenswrapper[4753]: I0129 12:06:58.430881 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-x8kzr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d753320a-61c2-4c0e-bd48-96d74b352114\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tgns4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-x8kzr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 12:06:58 crc kubenswrapper[4753]: I0129 12:06:58.447259 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"703a6b9d-a15d-4ed3-b00b-db7bd5d42c61\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cab3ab80a8fddbc591cd07fdbed800f525bb8a1b5505bf818ebb59cbcce73003\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7af8be0836a8ff7d08ff48d9487acdedd4b83127538d071d13b3215400c0f8de\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91d57a9cede640bdcec325175598b72dd00398436b730248728c7d9afd545b1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://34151f1c4f743e8fa0a2c27e644bbea9c3911e2252501ad8aa95ad3e4346222a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34151f1c4f743e8fa0a2c27e644bbea9c3911e2252501ad8aa95ad3e4346222a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T12:06:54Z\\\",\\\"message\\\":\\\"ed a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3028279136/tls.crt::/tmp/serving-cert-3028279136/tls.key\\\\\\\"\\\\nI0129 12:06:54.190978 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 12:06:54.198044 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 12:06:54.198078 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 12:06:54.198139 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 12:06:54.198147 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 12:06:54.208089 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 12:06:54.208122 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 12:06:54.208128 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 12:06:54.208132 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 12:06:54.208135 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 12:06:54.208139 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 12:06:54.208143 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 12:06:54.208164 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI0129 12:06:54.212380 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-3028279136/tls.crt::/tmp/serving-cert-3028279136/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1769688397\\\\\\\\\\\\\\\" (2026-01-29 12:06:37 +0000 UTC to 2026-02-28 12:06:38 +0000 UTC (now=2026-01-29 12:06:54.212317535 +0000 UTC))\\\\\\\"\\\\nF0129 12:06:54.212498 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e02d977e34888952623f4ced5e073d7bf1ff360f8deb435dc577d2ecf146cb2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:33Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6538a934d097cd5a295952f64961a0a8fb5bfa5ebcd27a0a4dd3638029b9cdb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6538a934d097cd5a295952f64961a0a8fb5bfa5ebcd27a0a4dd3638029b9cdb1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 12:06:58 crc kubenswrapper[4753]: I0129 12:06:58.460765 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5d7ccf10988fd83d31d42be6f82403067a0cc50313c4d5dcddfb528507efffa4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 12:06:58 crc kubenswrapper[4753]: I0129 12:06:58.466209 4753 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Jan 29 12:06:58 crc kubenswrapper[4753]: I0129 12:06:58.495798 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 12:06:58 crc kubenswrapper[4753]: I0129 12:06:58.637617 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-03 01:55:21.614496295 +0000 UTC Jan 29 12:06:58 crc kubenswrapper[4753]: I0129 12:06:58.642277 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"966eb8e59f82bacf20ebfa1b1d5bf9dc0a9cdb3b430ae83ed9bde106eb3fa521"} Jan 29 12:06:58 crc kubenswrapper[4753]: I0129 12:06:58.643291 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-x8kzr" event={"ID":"d753320a-61c2-4c0e-bd48-96d74b352114","Type":"ContainerStarted","Data":"be34008200679670811dab614711125440652987ee8a43b36cd6c6a7bf950cdf"} Jan 29 12:06:58 crc kubenswrapper[4753]: I0129 12:06:58.698780 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 12:06:58 crc kubenswrapper[4753]: I0129 12:06:58.738764 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:06:58 crc kubenswrapper[4753]: E0129 12:06:58.739073 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:07:02.73904482 +0000 UTC m=+36.991126285 (durationBeforeRetry 4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:06:58 crc kubenswrapper[4753]: I0129 12:06:58.776761 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 12:06:58 crc kubenswrapper[4753]: I0129 12:06:58.891719 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 12:06:58 crc kubenswrapper[4753]: I0129 12:06:58.891798 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 12:06:58 crc kubenswrapper[4753]: E0129 12:06:58.891869 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 12:06:58 crc kubenswrapper[4753]: I0129 12:06:58.893945 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 12:06:58 crc kubenswrapper[4753]: I0129 12:06:58.894032 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 12:06:58 crc kubenswrapper[4753]: E0129 12:06:58.894198 4753 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 29 12:06:58 crc kubenswrapper[4753]: E0129 12:06:58.894292 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-29 12:07:02.894261272 +0000 UTC m=+37.146342727 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 29 12:06:58 crc kubenswrapper[4753]: E0129 12:06:58.894385 4753 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 29 12:06:58 crc kubenswrapper[4753]: E0129 12:06:58.894431 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-29 12:07:02.894420957 +0000 UTC m=+37.146502412 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 29 12:06:58 crc kubenswrapper[4753]: E0129 12:06:58.898109 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 12:06:58 crc kubenswrapper[4753]: I0129 12:06:58.929400 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-daemon-7c24x"] Jan 29 12:06:58 crc kubenswrapper[4753]: I0129 12:06:58.929940 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/node-resolver-5m2jf"] Jan 29 12:06:58 crc kubenswrapper[4753]: I0129 12:06:58.930161 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-5m2jf" Jan 29 12:06:58 crc kubenswrapper[4753]: I0129 12:06:58.930633 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" Jan 29 12:06:58 crc kubenswrapper[4753]: I0129 12:06:58.932051 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 12:06:58 crc kubenswrapper[4753]: I0129 12:06:58.932719 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Jan 29 12:06:58 crc kubenswrapper[4753]: I0129 12:06:58.933081 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Jan 29 12:06:58 crc kubenswrapper[4753]: I0129 12:06:58.933323 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Jan 29 12:06:58 crc kubenswrapper[4753]: I0129 12:06:58.933645 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Jan 29 12:06:58 crc kubenswrapper[4753]: I0129 12:06:58.935796 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Jan 29 12:06:58 crc kubenswrapper[4753]: I0129 12:06:58.935840 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Jan 29 12:06:58 crc kubenswrapper[4753]: I0129 12:06:58.935848 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Jan 29 12:06:58 crc kubenswrapper[4753]: I0129 12:06:58.936093 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Jan 29 12:06:58 crc kubenswrapper[4753]: I0129 12:06:58.965458 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b108c546-a8c1-4bf9-814b-bfb5871b3013\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9842d9d6fed8c87bc20835964d3271b0be3117fedc658e6ef028db422d1ad478\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4fd4169036d3a646c9e5e4bfa8d86a92c557c3d8f77608ae5308dd0b8c45517\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b4a6bbb431e321e54013c96e3cca1537283cbdf65d89a978780550d603663fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://31bd7db7bddd3e16cb455f7af9fd3a9bca49919c8f1ffb676dec83c543ba7763\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 12:06:58 crc kubenswrapper[4753]: I0129 12:06:58.976500 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 12:06:58 crc kubenswrapper[4753]: I0129 12:06:58.992896 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"703a6b9d-a15d-4ed3-b00b-db7bd5d42c61\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cab3ab80a8fddbc591cd07fdbed800f525bb8a1b5505bf818ebb59cbcce73003\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7af8be0836a8ff7d08ff48d9487acdedd4b83127538d071d13b3215400c0f8de\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91d57a9cede640bdcec325175598b72dd00398436b730248728c7d9afd545b1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://34151f1c4f743e8fa0a2c27e644bbea9c3911e2252501ad8aa95ad3e4346222a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34151f1c4f743e8fa0a2c27e644bbea9c3911e2252501ad8aa95ad3e4346222a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T12:06:54Z\\\",\\\"message\\\":\\\"ed a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3028279136/tls.crt::/tmp/serving-cert-3028279136/tls.key\\\\\\\"\\\\nI0129 12:06:54.190978 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 12:06:54.198044 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 12:06:54.198078 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 12:06:54.198139 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 12:06:54.198147 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 12:06:54.208089 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 12:06:54.208122 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 12:06:54.208128 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 12:06:54.208132 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 12:06:54.208135 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 12:06:54.208139 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 12:06:54.208143 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 12:06:54.208164 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI0129 12:06:54.212380 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-3028279136/tls.crt::/tmp/serving-cert-3028279136/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1769688397\\\\\\\\\\\\\\\" (2026-01-29 12:06:37 +0000 UTC to 2026-02-28 12:06:38 +0000 UTC (now=2026-01-29 12:06:54.212317535 +0000 UTC))\\\\\\\"\\\\nF0129 12:06:54.212498 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e02d977e34888952623f4ced5e073d7bf1ff360f8deb435dc577d2ecf146cb2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:33Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6538a934d097cd5a295952f64961a0a8fb5bfa5ebcd27a0a4dd3638029b9cdb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6538a934d097cd5a295952f64961a0a8fb5bfa5ebcd27a0a4dd3638029b9cdb1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 12:06:58 crc kubenswrapper[4753]: I0129 12:06:58.995264 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/b0310995-a7c7-47c3-ae6c-05daaaba92a6-rootfs\") pod \"machine-config-daemon-7c24x\" (UID: \"b0310995-a7c7-47c3-ae6c-05daaaba92a6\") " pod="openshift-machine-config-operator/machine-config-daemon-7c24x" Jan 29 12:06:58 crc kubenswrapper[4753]: I0129 12:06:58.995322 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/b0310995-a7c7-47c3-ae6c-05daaaba92a6-proxy-tls\") pod \"machine-config-daemon-7c24x\" (UID: \"b0310995-a7c7-47c3-ae6c-05daaaba92a6\") " pod="openshift-machine-config-operator/machine-config-daemon-7c24x" Jan 29 12:06:58 crc kubenswrapper[4753]: I0129 12:06:58.995489 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zzwn6\" (UniqueName: \"kubernetes.io/projected/0203345d-1e9f-4cfe-bde7-90f87221d1a6-kube-api-access-zzwn6\") pod \"node-resolver-5m2jf\" (UID: \"0203345d-1e9f-4cfe-bde7-90f87221d1a6\") " pod="openshift-dns/node-resolver-5m2jf" Jan 29 12:06:58 crc kubenswrapper[4753]: I0129 12:06:58.995532 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-txdmk\" (UniqueName: \"kubernetes.io/projected/b0310995-a7c7-47c3-ae6c-05daaaba92a6-kube-api-access-txdmk\") pod \"machine-config-daemon-7c24x\" (UID: \"b0310995-a7c7-47c3-ae6c-05daaaba92a6\") " pod="openshift-machine-config-operator/machine-config-daemon-7c24x" Jan 29 12:06:58 crc kubenswrapper[4753]: I0129 12:06:58.995547 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/0203345d-1e9f-4cfe-bde7-90f87221d1a6-hosts-file\") pod \"node-resolver-5m2jf\" (UID: \"0203345d-1e9f-4cfe-bde7-90f87221d1a6\") " pod="openshift-dns/node-resolver-5m2jf" Jan 29 12:06:58 crc kubenswrapper[4753]: I0129 12:06:58.995578 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/b0310995-a7c7-47c3-ae6c-05daaaba92a6-mcd-auth-proxy-config\") pod \"machine-config-daemon-7c24x\" (UID: \"b0310995-a7c7-47c3-ae6c-05daaaba92a6\") " pod="openshift-machine-config-operator/machine-config-daemon-7c24x" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.005624 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.021198 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-x8kzr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d753320a-61c2-4c0e-bd48-96d74b352114\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tgns4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-x8kzr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.038299 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966eb8e59f82bacf20ebfa1b1d5bf9dc0a9cdb3b430ae83ed9bde106eb3fa521\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea7c92ec02a9cf6ec5fbd8efe22a066dfef9b48f042137964c5dd49578c99079\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.096387 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zzwn6\" (UniqueName: \"kubernetes.io/projected/0203345d-1e9f-4cfe-bde7-90f87221d1a6-kube-api-access-zzwn6\") pod \"node-resolver-5m2jf\" (UID: \"0203345d-1e9f-4cfe-bde7-90f87221d1a6\") " pod="openshift-dns/node-resolver-5m2jf" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.096434 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-txdmk\" (UniqueName: \"kubernetes.io/projected/b0310995-a7c7-47c3-ae6c-05daaaba92a6-kube-api-access-txdmk\") pod \"machine-config-daemon-7c24x\" (UID: \"b0310995-a7c7-47c3-ae6c-05daaaba92a6\") " pod="openshift-machine-config-operator/machine-config-daemon-7c24x" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.096450 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/0203345d-1e9f-4cfe-bde7-90f87221d1a6-hosts-file\") pod \"node-resolver-5m2jf\" (UID: \"0203345d-1e9f-4cfe-bde7-90f87221d1a6\") " pod="openshift-dns/node-resolver-5m2jf" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.096478 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/b0310995-a7c7-47c3-ae6c-05daaaba92a6-mcd-auth-proxy-config\") pod \"machine-config-daemon-7c24x\" (UID: \"b0310995-a7c7-47c3-ae6c-05daaaba92a6\") " pod="openshift-machine-config-operator/machine-config-daemon-7c24x" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.096504 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/b0310995-a7c7-47c3-ae6c-05daaaba92a6-rootfs\") pod \"machine-config-daemon-7c24x\" (UID: \"b0310995-a7c7-47c3-ae6c-05daaaba92a6\") " pod="openshift-machine-config-operator/machine-config-daemon-7c24x" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.096519 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/b0310995-a7c7-47c3-ae6c-05daaaba92a6-proxy-tls\") pod \"machine-config-daemon-7c24x\" (UID: \"b0310995-a7c7-47c3-ae6c-05daaaba92a6\") " pod="openshift-machine-config-operator/machine-config-daemon-7c24x" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.096750 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/0203345d-1e9f-4cfe-bde7-90f87221d1a6-hosts-file\") pod \"node-resolver-5m2jf\" (UID: \"0203345d-1e9f-4cfe-bde7-90f87221d1a6\") " pod="openshift-dns/node-resolver-5m2jf" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.096863 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/b0310995-a7c7-47c3-ae6c-05daaaba92a6-rootfs\") pod \"machine-config-daemon-7c24x\" (UID: \"b0310995-a7c7-47c3-ae6c-05daaaba92a6\") " pod="openshift-machine-config-operator/machine-config-daemon-7c24x" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.097357 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/b0310995-a7c7-47c3-ae6c-05daaaba92a6-mcd-auth-proxy-config\") pod \"machine-config-daemon-7c24x\" (UID: \"b0310995-a7c7-47c3-ae6c-05daaaba92a6\") " pod="openshift-machine-config-operator/machine-config-daemon-7c24x" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.106183 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/b0310995-a7c7-47c3-ae6c-05daaaba92a6-proxy-tls\") pod \"machine-config-daemon-7c24x\" (UID: \"b0310995-a7c7-47c3-ae6c-05daaaba92a6\") " pod="openshift-machine-config-operator/machine-config-daemon-7c24x" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.121570 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0310995-a7c7-47c3-ae6c-05daaaba92a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-txdmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-txdmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:58Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7c24x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.136537 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b108c546-a8c1-4bf9-814b-bfb5871b3013\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9842d9d6fed8c87bc20835964d3271b0be3117fedc658e6ef028db422d1ad478\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4fd4169036d3a646c9e5e4bfa8d86a92c557c3d8f77608ae5308dd0b8c45517\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b4a6bbb431e321e54013c96e3cca1537283cbdf65d89a978780550d603663fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://31bd7db7bddd3e16cb455f7af9fd3a9bca49919c8f1ffb676dec83c543ba7763\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.139469 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-txdmk\" (UniqueName: \"kubernetes.io/projected/b0310995-a7c7-47c3-ae6c-05daaaba92a6-kube-api-access-txdmk\") pod \"machine-config-daemon-7c24x\" (UID: \"b0310995-a7c7-47c3-ae6c-05daaaba92a6\") " pod="openshift-machine-config-operator/machine-config-daemon-7c24x" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.145485 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zzwn6\" (UniqueName: \"kubernetes.io/projected/0203345d-1e9f-4cfe-bde7-90f87221d1a6-kube-api-access-zzwn6\") pod \"node-resolver-5m2jf\" (UID: \"0203345d-1e9f-4cfe-bde7-90f87221d1a6\") " pod="openshift-dns/node-resolver-5m2jf" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.149557 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.167362 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.180535 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5d7ccf10988fd83d31d42be6f82403067a0cc50313c4d5dcddfb528507efffa4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.194637 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.202665 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-5m2jf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0203345d-1e9f-4cfe-bde7-90f87221d1a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zzwn6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:58Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-5m2jf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.245073 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-5m2jf" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.251805 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" Jan 29 12:06:59 crc kubenswrapper[4753]: W0129 12:06:59.270505 4753 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb0310995_a7c7_47c3_ae6c_05daaaba92a6.slice/crio-f00fa1f1af12b865f8e7a2fccf4f0c6578ec4de991ae2e3db3c4ab2366b741ee WatchSource:0}: Error finding container f00fa1f1af12b865f8e7a2fccf4f0c6578ec4de991ae2e3db3c4ab2366b741ee: Status 404 returned error can't find the container with id f00fa1f1af12b865f8e7a2fccf4f0c6578ec4de991ae2e3db3c4ab2366b741ee Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.586498 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-rnbz9"] Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.586907 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-rnbz9" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.589068 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.589394 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.589905 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-additional-cni-plugins-vwcjk"] Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.590803 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-vwcjk" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.592051 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.592193 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.592877 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.593074 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.593204 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.638398 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-24 09:47:21.840666579 +0000 UTC Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.654940 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/b372210b-6e1b-4a80-b379-7c1d570712f3-host-run-k8s-cni-cncf-io\") pod \"multus-rnbz9\" (UID: \"b372210b-6e1b-4a80-b379-7c1d570712f3\") " pod="openshift-multus/multus-rnbz9" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.654990 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/a99ab890-0a5a-4abb-86fb-a3731ff6b2c1-tuning-conf-dir\") pod \"multus-additional-cni-plugins-vwcjk\" (UID: \"a99ab890-0a5a-4abb-86fb-a3731ff6b2c1\") " pod="openshift-multus/multus-additional-cni-plugins-vwcjk" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.655022 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/b372210b-6e1b-4a80-b379-7c1d570712f3-host-var-lib-kubelet\") pod \"multus-rnbz9\" (UID: \"b372210b-6e1b-4a80-b379-7c1d570712f3\") " pod="openshift-multus/multus-rnbz9" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.655053 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/b372210b-6e1b-4a80-b379-7c1d570712f3-multus-daemon-config\") pod \"multus-rnbz9\" (UID: \"b372210b-6e1b-4a80-b379-7c1d570712f3\") " pod="openshift-multus/multus-rnbz9" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.655115 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/a99ab890-0a5a-4abb-86fb-a3731ff6b2c1-cnibin\") pod \"multus-additional-cni-plugins-vwcjk\" (UID: \"a99ab890-0a5a-4abb-86fb-a3731ff6b2c1\") " pod="openshift-multus/multus-additional-cni-plugins-vwcjk" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.655194 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/b372210b-6e1b-4a80-b379-7c1d570712f3-host-var-lib-cni-bin\") pod \"multus-rnbz9\" (UID: \"b372210b-6e1b-4a80-b379-7c1d570712f3\") " pod="openshift-multus/multus-rnbz9" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.655212 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/b372210b-6e1b-4a80-b379-7c1d570712f3-multus-conf-dir\") pod \"multus-rnbz9\" (UID: \"b372210b-6e1b-4a80-b379-7c1d570712f3\") " pod="openshift-multus/multus-rnbz9" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.655252 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/a99ab890-0a5a-4abb-86fb-a3731ff6b2c1-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-vwcjk\" (UID: \"a99ab890-0a5a-4abb-86fb-a3731ff6b2c1\") " pod="openshift-multus/multus-additional-cni-plugins-vwcjk" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.655286 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/b372210b-6e1b-4a80-b379-7c1d570712f3-os-release\") pod \"multus-rnbz9\" (UID: \"b372210b-6e1b-4a80-b379-7c1d570712f3\") " pod="openshift-multus/multus-rnbz9" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.655309 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/b372210b-6e1b-4a80-b379-7c1d570712f3-host-run-multus-certs\") pod \"multus-rnbz9\" (UID: \"b372210b-6e1b-4a80-b379-7c1d570712f3\") " pod="openshift-multus/multus-rnbz9" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.655329 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/a99ab890-0a5a-4abb-86fb-a3731ff6b2c1-cni-binary-copy\") pod \"multus-additional-cni-plugins-vwcjk\" (UID: \"a99ab890-0a5a-4abb-86fb-a3731ff6b2c1\") " pod="openshift-multus/multus-additional-cni-plugins-vwcjk" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.655344 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/b372210b-6e1b-4a80-b379-7c1d570712f3-cnibin\") pod \"multus-rnbz9\" (UID: \"b372210b-6e1b-4a80-b379-7c1d570712f3\") " pod="openshift-multus/multus-rnbz9" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.655367 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/b372210b-6e1b-4a80-b379-7c1d570712f3-host-run-netns\") pod \"multus-rnbz9\" (UID: \"b372210b-6e1b-4a80-b379-7c1d570712f3\") " pod="openshift-multus/multus-rnbz9" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.655390 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/b372210b-6e1b-4a80-b379-7c1d570712f3-host-var-lib-cni-multus\") pod \"multus-rnbz9\" (UID: \"b372210b-6e1b-4a80-b379-7c1d570712f3\") " pod="openshift-multus/multus-rnbz9" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.655411 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/b372210b-6e1b-4a80-b379-7c1d570712f3-etc-kubernetes\") pod \"multus-rnbz9\" (UID: \"b372210b-6e1b-4a80-b379-7c1d570712f3\") " pod="openshift-multus/multus-rnbz9" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.655453 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/a99ab890-0a5a-4abb-86fb-a3731ff6b2c1-os-release\") pod \"multus-additional-cni-plugins-vwcjk\" (UID: \"a99ab890-0a5a-4abb-86fb-a3731ff6b2c1\") " pod="openshift-multus/multus-additional-cni-plugins-vwcjk" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.655565 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/a99ab890-0a5a-4abb-86fb-a3731ff6b2c1-system-cni-dir\") pod \"multus-additional-cni-plugins-vwcjk\" (UID: \"a99ab890-0a5a-4abb-86fb-a3731ff6b2c1\") " pod="openshift-multus/multus-additional-cni-plugins-vwcjk" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.655641 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t6f7b\" (UniqueName: \"kubernetes.io/projected/a99ab890-0a5a-4abb-86fb-a3731ff6b2c1-kube-api-access-t6f7b\") pod \"multus-additional-cni-plugins-vwcjk\" (UID: \"a99ab890-0a5a-4abb-86fb-a3731ff6b2c1\") " pod="openshift-multus/multus-additional-cni-plugins-vwcjk" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.655686 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/b372210b-6e1b-4a80-b379-7c1d570712f3-system-cni-dir\") pod \"multus-rnbz9\" (UID: \"b372210b-6e1b-4a80-b379-7c1d570712f3\") " pod="openshift-multus/multus-rnbz9" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.655710 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/b372210b-6e1b-4a80-b379-7c1d570712f3-cni-binary-copy\") pod \"multus-rnbz9\" (UID: \"b372210b-6e1b-4a80-b379-7c1d570712f3\") " pod="openshift-multus/multus-rnbz9" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.655777 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/b372210b-6e1b-4a80-b379-7c1d570712f3-multus-cni-dir\") pod \"multus-rnbz9\" (UID: \"b372210b-6e1b-4a80-b379-7c1d570712f3\") " pod="openshift-multus/multus-rnbz9" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.655796 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/b372210b-6e1b-4a80-b379-7c1d570712f3-multus-socket-dir-parent\") pod \"multus-rnbz9\" (UID: \"b372210b-6e1b-4a80-b379-7c1d570712f3\") " pod="openshift-multus/multus-rnbz9" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.655822 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/b372210b-6e1b-4a80-b379-7c1d570712f3-hostroot\") pod \"multus-rnbz9\" (UID: \"b372210b-6e1b-4a80-b379-7c1d570712f3\") " pod="openshift-multus/multus-rnbz9" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.655845 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gqzp6\" (UniqueName: \"kubernetes.io/projected/b372210b-6e1b-4a80-b379-7c1d570712f3-kube-api-access-gqzp6\") pod \"multus-rnbz9\" (UID: \"b372210b-6e1b-4a80-b379-7c1d570712f3\") " pod="openshift-multus/multus-rnbz9" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.701952 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-x8kzr" event={"ID":"d753320a-61c2-4c0e-bd48-96d74b352114","Type":"ContainerStarted","Data":"1c7a667b7243fc36ff18cb4c017152fff3d5f4eb5e28e306ace44420cd722c5f"} Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.703259 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" event={"ID":"b0310995-a7c7-47c3-ae6c-05daaaba92a6","Type":"ContainerStarted","Data":"f00fa1f1af12b865f8e7a2fccf4f0c6578ec4de991ae2e3db3c4ab2366b741ee"} Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.704540 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-5m2jf" event={"ID":"0203345d-1e9f-4cfe-bde7-90f87221d1a6","Type":"ContainerStarted","Data":"7cdd7a37398faab8d83f441f1566a4505cfa8bb17fbcacf217080704e64a4f15"} Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.756800 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/b372210b-6e1b-4a80-b379-7c1d570712f3-system-cni-dir\") pod \"multus-rnbz9\" (UID: \"b372210b-6e1b-4a80-b379-7c1d570712f3\") " pod="openshift-multus/multus-rnbz9" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.756866 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/b372210b-6e1b-4a80-b379-7c1d570712f3-cni-binary-copy\") pod \"multus-rnbz9\" (UID: \"b372210b-6e1b-4a80-b379-7c1d570712f3\") " pod="openshift-multus/multus-rnbz9" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.756968 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/b372210b-6e1b-4a80-b379-7c1d570712f3-multus-cni-dir\") pod \"multus-rnbz9\" (UID: \"b372210b-6e1b-4a80-b379-7c1d570712f3\") " pod="openshift-multus/multus-rnbz9" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.757058 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/b372210b-6e1b-4a80-b379-7c1d570712f3-multus-socket-dir-parent\") pod \"multus-rnbz9\" (UID: \"b372210b-6e1b-4a80-b379-7c1d570712f3\") " pod="openshift-multus/multus-rnbz9" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.757090 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/b372210b-6e1b-4a80-b379-7c1d570712f3-hostroot\") pod \"multus-rnbz9\" (UID: \"b372210b-6e1b-4a80-b379-7c1d570712f3\") " pod="openshift-multus/multus-rnbz9" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.757107 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gqzp6\" (UniqueName: \"kubernetes.io/projected/b372210b-6e1b-4a80-b379-7c1d570712f3-kube-api-access-gqzp6\") pod \"multus-rnbz9\" (UID: \"b372210b-6e1b-4a80-b379-7c1d570712f3\") " pod="openshift-multus/multus-rnbz9" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.757131 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/b372210b-6e1b-4a80-b379-7c1d570712f3-host-run-k8s-cni-cncf-io\") pod \"multus-rnbz9\" (UID: \"b372210b-6e1b-4a80-b379-7c1d570712f3\") " pod="openshift-multus/multus-rnbz9" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.757146 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/a99ab890-0a5a-4abb-86fb-a3731ff6b2c1-tuning-conf-dir\") pod \"multus-additional-cni-plugins-vwcjk\" (UID: \"a99ab890-0a5a-4abb-86fb-a3731ff6b2c1\") " pod="openshift-multus/multus-additional-cni-plugins-vwcjk" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.757174 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/b372210b-6e1b-4a80-b379-7c1d570712f3-multus-daemon-config\") pod \"multus-rnbz9\" (UID: \"b372210b-6e1b-4a80-b379-7c1d570712f3\") " pod="openshift-multus/multus-rnbz9" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.757193 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/a99ab890-0a5a-4abb-86fb-a3731ff6b2c1-cnibin\") pod \"multus-additional-cni-plugins-vwcjk\" (UID: \"a99ab890-0a5a-4abb-86fb-a3731ff6b2c1\") " pod="openshift-multus/multus-additional-cni-plugins-vwcjk" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.757302 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/b372210b-6e1b-4a80-b379-7c1d570712f3-host-var-lib-kubelet\") pod \"multus-rnbz9\" (UID: \"b372210b-6e1b-4a80-b379-7c1d570712f3\") " pod="openshift-multus/multus-rnbz9" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.757359 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/a99ab890-0a5a-4abb-86fb-a3731ff6b2c1-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-vwcjk\" (UID: \"a99ab890-0a5a-4abb-86fb-a3731ff6b2c1\") " pod="openshift-multus/multus-additional-cni-plugins-vwcjk" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.757408 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/b372210b-6e1b-4a80-b379-7c1d570712f3-host-var-lib-cni-bin\") pod \"multus-rnbz9\" (UID: \"b372210b-6e1b-4a80-b379-7c1d570712f3\") " pod="openshift-multus/multus-rnbz9" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.757430 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/b372210b-6e1b-4a80-b379-7c1d570712f3-multus-conf-dir\") pod \"multus-rnbz9\" (UID: \"b372210b-6e1b-4a80-b379-7c1d570712f3\") " pod="openshift-multus/multus-rnbz9" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.757446 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/b372210b-6e1b-4a80-b379-7c1d570712f3-host-var-lib-kubelet\") pod \"multus-rnbz9\" (UID: \"b372210b-6e1b-4a80-b379-7c1d570712f3\") " pod="openshift-multus/multus-rnbz9" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.757464 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/a99ab890-0a5a-4abb-86fb-a3731ff6b2c1-cni-binary-copy\") pod \"multus-additional-cni-plugins-vwcjk\" (UID: \"a99ab890-0a5a-4abb-86fb-a3731ff6b2c1\") " pod="openshift-multus/multus-additional-cni-plugins-vwcjk" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.757541 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/b372210b-6e1b-4a80-b379-7c1d570712f3-os-release\") pod \"multus-rnbz9\" (UID: \"b372210b-6e1b-4a80-b379-7c1d570712f3\") " pod="openshift-multus/multus-rnbz9" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.757582 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/b372210b-6e1b-4a80-b379-7c1d570712f3-host-run-multus-certs\") pod \"multus-rnbz9\" (UID: \"b372210b-6e1b-4a80-b379-7c1d570712f3\") " pod="openshift-multus/multus-rnbz9" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.757561 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/a99ab890-0a5a-4abb-86fb-a3731ff6b2c1-cnibin\") pod \"multus-additional-cni-plugins-vwcjk\" (UID: \"a99ab890-0a5a-4abb-86fb-a3731ff6b2c1\") " pod="openshift-multus/multus-additional-cni-plugins-vwcjk" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.757631 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/b372210b-6e1b-4a80-b379-7c1d570712f3-cnibin\") pod \"multus-rnbz9\" (UID: \"b372210b-6e1b-4a80-b379-7c1d570712f3\") " pod="openshift-multus/multus-rnbz9" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.757554 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/b372210b-6e1b-4a80-b379-7c1d570712f3-host-var-lib-cni-bin\") pod \"multus-rnbz9\" (UID: \"b372210b-6e1b-4a80-b379-7c1d570712f3\") " pod="openshift-multus/multus-rnbz9" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.757528 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/b372210b-6e1b-4a80-b379-7c1d570712f3-multus-socket-dir-parent\") pod \"multus-rnbz9\" (UID: \"b372210b-6e1b-4a80-b379-7c1d570712f3\") " pod="openshift-multus/multus-rnbz9" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.757718 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/b372210b-6e1b-4a80-b379-7c1d570712f3-host-run-netns\") pod \"multus-rnbz9\" (UID: \"b372210b-6e1b-4a80-b379-7c1d570712f3\") " pod="openshift-multus/multus-rnbz9" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.757613 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/b372210b-6e1b-4a80-b379-7c1d570712f3-host-run-k8s-cni-cncf-io\") pod \"multus-rnbz9\" (UID: \"b372210b-6e1b-4a80-b379-7c1d570712f3\") " pod="openshift-multus/multus-rnbz9" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.757809 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/b372210b-6e1b-4a80-b379-7c1d570712f3-os-release\") pod \"multus-rnbz9\" (UID: \"b372210b-6e1b-4a80-b379-7c1d570712f3\") " pod="openshift-multus/multus-rnbz9" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.757788 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/b372210b-6e1b-4a80-b379-7c1d570712f3-host-var-lib-cni-multus\") pod \"multus-rnbz9\" (UID: \"b372210b-6e1b-4a80-b379-7c1d570712f3\") " pod="openshift-multus/multus-rnbz9" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.757839 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/b372210b-6e1b-4a80-b379-7c1d570712f3-host-var-lib-cni-multus\") pod \"multus-rnbz9\" (UID: \"b372210b-6e1b-4a80-b379-7c1d570712f3\") " pod="openshift-multus/multus-rnbz9" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.757730 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/b372210b-6e1b-4a80-b379-7c1d570712f3-multus-conf-dir\") pod \"multus-rnbz9\" (UID: \"b372210b-6e1b-4a80-b379-7c1d570712f3\") " pod="openshift-multus/multus-rnbz9" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.757860 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/b372210b-6e1b-4a80-b379-7c1d570712f3-system-cni-dir\") pod \"multus-rnbz9\" (UID: \"b372210b-6e1b-4a80-b379-7c1d570712f3\") " pod="openshift-multus/multus-rnbz9" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.757352 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/b372210b-6e1b-4a80-b379-7c1d570712f3-hostroot\") pod \"multus-rnbz9\" (UID: \"b372210b-6e1b-4a80-b379-7c1d570712f3\") " pod="openshift-multus/multus-rnbz9" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.757907 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/b372210b-6e1b-4a80-b379-7c1d570712f3-cnibin\") pod \"multus-rnbz9\" (UID: \"b372210b-6e1b-4a80-b379-7c1d570712f3\") " pod="openshift-multus/multus-rnbz9" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.757934 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/b372210b-6e1b-4a80-b379-7c1d570712f3-host-run-netns\") pod \"multus-rnbz9\" (UID: \"b372210b-6e1b-4a80-b379-7c1d570712f3\") " pod="openshift-multus/multus-rnbz9" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.757588 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/b372210b-6e1b-4a80-b379-7c1d570712f3-multus-cni-dir\") pod \"multus-rnbz9\" (UID: \"b372210b-6e1b-4a80-b379-7c1d570712f3\") " pod="openshift-multus/multus-rnbz9" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.757938 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/b372210b-6e1b-4a80-b379-7c1d570712f3-host-run-multus-certs\") pod \"multus-rnbz9\" (UID: \"b372210b-6e1b-4a80-b379-7c1d570712f3\") " pod="openshift-multus/multus-rnbz9" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.757979 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/b372210b-6e1b-4a80-b379-7c1d570712f3-etc-kubernetes\") pod \"multus-rnbz9\" (UID: \"b372210b-6e1b-4a80-b379-7c1d570712f3\") " pod="openshift-multus/multus-rnbz9" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.758029 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/b372210b-6e1b-4a80-b379-7c1d570712f3-etc-kubernetes\") pod \"multus-rnbz9\" (UID: \"b372210b-6e1b-4a80-b379-7c1d570712f3\") " pod="openshift-multus/multus-rnbz9" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.758069 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/a99ab890-0a5a-4abb-86fb-a3731ff6b2c1-os-release\") pod \"multus-additional-cni-plugins-vwcjk\" (UID: \"a99ab890-0a5a-4abb-86fb-a3731ff6b2c1\") " pod="openshift-multus/multus-additional-cni-plugins-vwcjk" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.758188 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/a99ab890-0a5a-4abb-86fb-a3731ff6b2c1-os-release\") pod \"multus-additional-cni-plugins-vwcjk\" (UID: \"a99ab890-0a5a-4abb-86fb-a3731ff6b2c1\") " pod="openshift-multus/multus-additional-cni-plugins-vwcjk" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.758297 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/a99ab890-0a5a-4abb-86fb-a3731ff6b2c1-system-cni-dir\") pod \"multus-additional-cni-plugins-vwcjk\" (UID: \"a99ab890-0a5a-4abb-86fb-a3731ff6b2c1\") " pod="openshift-multus/multus-additional-cni-plugins-vwcjk" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.758323 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t6f7b\" (UniqueName: \"kubernetes.io/projected/a99ab890-0a5a-4abb-86fb-a3731ff6b2c1-kube-api-access-t6f7b\") pod \"multus-additional-cni-plugins-vwcjk\" (UID: \"a99ab890-0a5a-4abb-86fb-a3731ff6b2c1\") " pod="openshift-multus/multus-additional-cni-plugins-vwcjk" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.758406 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/a99ab890-0a5a-4abb-86fb-a3731ff6b2c1-tuning-conf-dir\") pod \"multus-additional-cni-plugins-vwcjk\" (UID: \"a99ab890-0a5a-4abb-86fb-a3731ff6b2c1\") " pod="openshift-multus/multus-additional-cni-plugins-vwcjk" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.758426 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/a99ab890-0a5a-4abb-86fb-a3731ff6b2c1-system-cni-dir\") pod \"multus-additional-cni-plugins-vwcjk\" (UID: \"a99ab890-0a5a-4abb-86fb-a3731ff6b2c1\") " pod="openshift-multus/multus-additional-cni-plugins-vwcjk" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.760678 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/b372210b-6e1b-4a80-b379-7c1d570712f3-cni-binary-copy\") pod \"multus-rnbz9\" (UID: \"b372210b-6e1b-4a80-b379-7c1d570712f3\") " pod="openshift-multus/multus-rnbz9" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.760687 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/b372210b-6e1b-4a80-b379-7c1d570712f3-multus-daemon-config\") pod \"multus-rnbz9\" (UID: \"b372210b-6e1b-4a80-b379-7c1d570712f3\") " pod="openshift-multus/multus-rnbz9" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.761487 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/a99ab890-0a5a-4abb-86fb-a3731ff6b2c1-cni-binary-copy\") pod \"multus-additional-cni-plugins-vwcjk\" (UID: \"a99ab890-0a5a-4abb-86fb-a3731ff6b2c1\") " pod="openshift-multus/multus-additional-cni-plugins-vwcjk" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.761847 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/a99ab890-0a5a-4abb-86fb-a3731ff6b2c1-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-vwcjk\" (UID: \"a99ab890-0a5a-4abb-86fb-a3731ff6b2c1\") " pod="openshift-multus/multus-additional-cni-plugins-vwcjk" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.889633 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 12:06:59 crc kubenswrapper[4753]: E0129 12:06:59.889795 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.943307 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.961641 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.962067 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 12:06:59 crc kubenswrapper[4753]: E0129 12:06:59.962632 4753 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 29 12:06:59 crc kubenswrapper[4753]: E0129 12:06:59.962789 4753 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 29 12:06:59 crc kubenswrapper[4753]: E0129 12:06:59.962880 4753 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 12:06:59 crc kubenswrapper[4753]: E0129 12:06:59.963063 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-29 12:07:03.963046697 +0000 UTC m=+38.215128142 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 12:06:59 crc kubenswrapper[4753]: E0129 12:06:59.962961 4753 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 29 12:06:59 crc kubenswrapper[4753]: E0129 12:06:59.963545 4753 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 29 12:06:59 crc kubenswrapper[4753]: E0129 12:06:59.963664 4753 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 12:06:59 crc kubenswrapper[4753]: E0129 12:06:59.963765 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-29 12:07:03.963755678 +0000 UTC m=+38.215837133 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.973628 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-5m2jf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0203345d-1e9f-4cfe-bde7-90f87221d1a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zzwn6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:58Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-5m2jf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.981997 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gqzp6\" (UniqueName: \"kubernetes.io/projected/b372210b-6e1b-4a80-b379-7c1d570712f3-kube-api-access-gqzp6\") pod \"multus-rnbz9\" (UID: \"b372210b-6e1b-4a80-b379-7c1d570712f3\") " pod="openshift-multus/multus-rnbz9" Jan 29 12:06:59 crc kubenswrapper[4753]: I0129 12:06:59.988567 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t6f7b\" (UniqueName: \"kubernetes.io/projected/a99ab890-0a5a-4abb-86fb-a3731ff6b2c1-kube-api-access-t6f7b\") pod \"multus-additional-cni-plugins-vwcjk\" (UID: \"a99ab890-0a5a-4abb-86fb-a3731ff6b2c1\") " pod="openshift-multus/multus-additional-cni-plugins-vwcjk" Jan 29 12:07:00 crc kubenswrapper[4753]: I0129 12:07:00.185384 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"703a6b9d-a15d-4ed3-b00b-db7bd5d42c61\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cab3ab80a8fddbc591cd07fdbed800f525bb8a1b5505bf818ebb59cbcce73003\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7af8be0836a8ff7d08ff48d9487acdedd4b83127538d071d13b3215400c0f8de\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91d57a9cede640bdcec325175598b72dd00398436b730248728c7d9afd545b1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://34151f1c4f743e8fa0a2c27e644bbea9c3911e2252501ad8aa95ad3e4346222a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34151f1c4f743e8fa0a2c27e644bbea9c3911e2252501ad8aa95ad3e4346222a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T12:06:54Z\\\",\\\"message\\\":\\\"ed a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3028279136/tls.crt::/tmp/serving-cert-3028279136/tls.key\\\\\\\"\\\\nI0129 12:06:54.190978 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 12:06:54.198044 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 12:06:54.198078 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 12:06:54.198139 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 12:06:54.198147 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 12:06:54.208089 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 12:06:54.208122 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 12:06:54.208128 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 12:06:54.208132 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 12:06:54.208135 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 12:06:54.208139 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 12:06:54.208143 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 12:06:54.208164 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI0129 12:06:54.212380 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-3028279136/tls.crt::/tmp/serving-cert-3028279136/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1769688397\\\\\\\\\\\\\\\" (2026-01-29 12:06:37 +0000 UTC to 2026-02-28 12:06:38 +0000 UTC (now=2026-01-29 12:06:54.212317535 +0000 UTC))\\\\\\\"\\\\nF0129 12:06:54.212498 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e02d977e34888952623f4ced5e073d7bf1ff360f8deb435dc577d2ecf146cb2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:33Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6538a934d097cd5a295952f64961a0a8fb5bfa5ebcd27a0a4dd3638029b9cdb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6538a934d097cd5a295952f64961a0a8fb5bfa5ebcd27a0a4dd3638029b9cdb1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:00Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:00 crc kubenswrapper[4753]: I0129 12:07:00.205297 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:00Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:00 crc kubenswrapper[4753]: I0129 12:07:00.304691 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-rnbz9" Jan 29 12:07:00 crc kubenswrapper[4753]: I0129 12:07:00.305302 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-vwcjk" Jan 29 12:07:00 crc kubenswrapper[4753]: W0129 12:07:00.335513 4753 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda99ab890_0a5a_4abb_86fb_a3731ff6b2c1.slice/crio-a84148dfcbeb903f85818ee666b548b0fa539df767d5f23948b401a59b6d653a WatchSource:0}: Error finding container a84148dfcbeb903f85818ee666b548b0fa539df767d5f23948b401a59b6d653a: Status 404 returned error can't find the container with id a84148dfcbeb903f85818ee666b548b0fa539df767d5f23948b401a59b6d653a Jan 29 12:07:00 crc kubenswrapper[4753]: I0129 12:07:00.336836 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-x8kzr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d753320a-61c2-4c0e-bd48-96d74b352114\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tgns4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-x8kzr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:00Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:00 crc kubenswrapper[4753]: I0129 12:07:00.356912 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966eb8e59f82bacf20ebfa1b1d5bf9dc0a9cdb3b430ae83ed9bde106eb3fa521\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea7c92ec02a9cf6ec5fbd8efe22a066dfef9b48f042137964c5dd49578c99079\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:00Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:00 crc kubenswrapper[4753]: I0129 12:07:00.378423 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0310995-a7c7-47c3-ae6c-05daaaba92a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-txdmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-txdmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:58Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7c24x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:00Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:00 crc kubenswrapper[4753]: I0129 12:07:00.500068 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rnbz9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b372210b-6e1b-4a80-b379-7c1d570712f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqzp6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:59Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rnbz9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:00Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:00 crc kubenswrapper[4753]: I0129 12:07:00.507314 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-nzkvz"] Jan 29 12:07:00 crc kubenswrapper[4753]: I0129 12:07:00.508319 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" Jan 29 12:07:00 crc kubenswrapper[4753]: I0129 12:07:00.513704 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Jan 29 12:07:00 crc kubenswrapper[4753]: I0129 12:07:00.513738 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Jan 29 12:07:00 crc kubenswrapper[4753]: I0129 12:07:00.513911 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Jan 29 12:07:00 crc kubenswrapper[4753]: I0129 12:07:00.514120 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Jan 29 12:07:00 crc kubenswrapper[4753]: I0129 12:07:00.514415 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Jan 29 12:07:00 crc kubenswrapper[4753]: I0129 12:07:00.514655 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Jan 29 12:07:00 crc kubenswrapper[4753]: I0129 12:07:00.514801 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Jan 29 12:07:00 crc kubenswrapper[4753]: I0129 12:07:00.534062 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b108c546-a8c1-4bf9-814b-bfb5871b3013\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9842d9d6fed8c87bc20835964d3271b0be3117fedc658e6ef028db422d1ad478\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4fd4169036d3a646c9e5e4bfa8d86a92c557c3d8f77608ae5308dd0b8c45517\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b4a6bbb431e321e54013c96e3cca1537283cbdf65d89a978780550d603663fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://31bd7db7bddd3e16cb455f7af9fd3a9bca49919c8f1ffb676dec83c543ba7763\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:00Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:00 crc kubenswrapper[4753]: I0129 12:07:00.543696 4753 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 12:07:00 crc kubenswrapper[4753]: I0129 12:07:00.544733 4753 scope.go:117] "RemoveContainer" containerID="34151f1c4f743e8fa0a2c27e644bbea9c3911e2252501ad8aa95ad3e4346222a" Jan 29 12:07:00 crc kubenswrapper[4753]: E0129 12:07:00.545032 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Jan 29 12:07:00 crc kubenswrapper[4753]: I0129 12:07:00.550325 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:00Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:00 crc kubenswrapper[4753]: I0129 12:07:00.569079 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:00Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:00 crc kubenswrapper[4753]: I0129 12:07:00.647359 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-10 12:02:20.698158781 +0000 UTC Jan 29 12:07:00 crc kubenswrapper[4753]: I0129 12:07:00.789820 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/80bec2ab-0a88-4818-9339-760edda3b07e-host-kubelet\") pod \"ovnkube-node-nzkvz\" (UID: \"80bec2ab-0a88-4818-9339-760edda3b07e\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" Jan 29 12:07:00 crc kubenswrapper[4753]: I0129 12:07:00.789900 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/80bec2ab-0a88-4818-9339-760edda3b07e-run-ovn\") pod \"ovnkube-node-nzkvz\" (UID: \"80bec2ab-0a88-4818-9339-760edda3b07e\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" Jan 29 12:07:00 crc kubenswrapper[4753]: I0129 12:07:00.789933 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/80bec2ab-0a88-4818-9339-760edda3b07e-ovn-node-metrics-cert\") pod \"ovnkube-node-nzkvz\" (UID: \"80bec2ab-0a88-4818-9339-760edda3b07e\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" Jan 29 12:07:00 crc kubenswrapper[4753]: I0129 12:07:00.789966 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/80bec2ab-0a88-4818-9339-760edda3b07e-log-socket\") pod \"ovnkube-node-nzkvz\" (UID: \"80bec2ab-0a88-4818-9339-760edda3b07e\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" Jan 29 12:07:00 crc kubenswrapper[4753]: I0129 12:07:00.789988 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/80bec2ab-0a88-4818-9339-760edda3b07e-run-systemd\") pod \"ovnkube-node-nzkvz\" (UID: \"80bec2ab-0a88-4818-9339-760edda3b07e\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" Jan 29 12:07:00 crc kubenswrapper[4753]: I0129 12:07:00.790057 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/80bec2ab-0a88-4818-9339-760edda3b07e-ovnkube-config\") pod \"ovnkube-node-nzkvz\" (UID: \"80bec2ab-0a88-4818-9339-760edda3b07e\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" Jan 29 12:07:00 crc kubenswrapper[4753]: I0129 12:07:00.790592 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/80bec2ab-0a88-4818-9339-760edda3b07e-host-cni-bin\") pod \"ovnkube-node-nzkvz\" (UID: \"80bec2ab-0a88-4818-9339-760edda3b07e\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" Jan 29 12:07:00 crc kubenswrapper[4753]: I0129 12:07:00.790626 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/80bec2ab-0a88-4818-9339-760edda3b07e-env-overrides\") pod \"ovnkube-node-nzkvz\" (UID: \"80bec2ab-0a88-4818-9339-760edda3b07e\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" Jan 29 12:07:00 crc kubenswrapper[4753]: I0129 12:07:00.790752 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/80bec2ab-0a88-4818-9339-760edda3b07e-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-nzkvz\" (UID: \"80bec2ab-0a88-4818-9339-760edda3b07e\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" Jan 29 12:07:00 crc kubenswrapper[4753]: I0129 12:07:00.790778 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/80bec2ab-0a88-4818-9339-760edda3b07e-etc-openvswitch\") pod \"ovnkube-node-nzkvz\" (UID: \"80bec2ab-0a88-4818-9339-760edda3b07e\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" Jan 29 12:07:00 crc kubenswrapper[4753]: I0129 12:07:00.790807 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/80bec2ab-0a88-4818-9339-760edda3b07e-node-log\") pod \"ovnkube-node-nzkvz\" (UID: \"80bec2ab-0a88-4818-9339-760edda3b07e\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" Jan 29 12:07:00 crc kubenswrapper[4753]: I0129 12:07:00.790832 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/80bec2ab-0a88-4818-9339-760edda3b07e-host-run-netns\") pod \"ovnkube-node-nzkvz\" (UID: \"80bec2ab-0a88-4818-9339-760edda3b07e\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" Jan 29 12:07:00 crc kubenswrapper[4753]: I0129 12:07:00.790852 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/80bec2ab-0a88-4818-9339-760edda3b07e-host-run-ovn-kubernetes\") pod \"ovnkube-node-nzkvz\" (UID: \"80bec2ab-0a88-4818-9339-760edda3b07e\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" Jan 29 12:07:00 crc kubenswrapper[4753]: I0129 12:07:00.790892 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/80bec2ab-0a88-4818-9339-760edda3b07e-host-slash\") pod \"ovnkube-node-nzkvz\" (UID: \"80bec2ab-0a88-4818-9339-760edda3b07e\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" Jan 29 12:07:00 crc kubenswrapper[4753]: I0129 12:07:00.807368 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-vwcjk" event={"ID":"a99ab890-0a5a-4abb-86fb-a3731ff6b2c1","Type":"ContainerStarted","Data":"a84148dfcbeb903f85818ee666b548b0fa539df767d5f23948b401a59b6d653a"} Jan 29 12:07:00 crc kubenswrapper[4753]: I0129 12:07:00.808363 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/80bec2ab-0a88-4818-9339-760edda3b07e-run-openvswitch\") pod \"ovnkube-node-nzkvz\" (UID: \"80bec2ab-0a88-4818-9339-760edda3b07e\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" Jan 29 12:07:00 crc kubenswrapper[4753]: I0129 12:07:00.808502 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/80bec2ab-0a88-4818-9339-760edda3b07e-host-cni-netd\") pod \"ovnkube-node-nzkvz\" (UID: \"80bec2ab-0a88-4818-9339-760edda3b07e\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" Jan 29 12:07:00 crc kubenswrapper[4753]: I0129 12:07:00.808529 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/80bec2ab-0a88-4818-9339-760edda3b07e-ovnkube-script-lib\") pod \"ovnkube-node-nzkvz\" (UID: \"80bec2ab-0a88-4818-9339-760edda3b07e\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" Jan 29 12:07:00 crc kubenswrapper[4753]: I0129 12:07:00.809114 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/80bec2ab-0a88-4818-9339-760edda3b07e-systemd-units\") pod \"ovnkube-node-nzkvz\" (UID: \"80bec2ab-0a88-4818-9339-760edda3b07e\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" Jan 29 12:07:00 crc kubenswrapper[4753]: I0129 12:07:00.809162 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/80bec2ab-0a88-4818-9339-760edda3b07e-var-lib-openvswitch\") pod \"ovnkube-node-nzkvz\" (UID: \"80bec2ab-0a88-4818-9339-760edda3b07e\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" Jan 29 12:07:00 crc kubenswrapper[4753]: I0129 12:07:00.809241 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nkh6d\" (UniqueName: \"kubernetes.io/projected/80bec2ab-0a88-4818-9339-760edda3b07e-kube-api-access-nkh6d\") pod \"ovnkube-node-nzkvz\" (UID: \"80bec2ab-0a88-4818-9339-760edda3b07e\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" Jan 29 12:07:00 crc kubenswrapper[4753]: I0129 12:07:00.817664 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5d7ccf10988fd83d31d42be6f82403067a0cc50313c4d5dcddfb528507efffa4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:00Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:00 crc kubenswrapper[4753]: I0129 12:07:00.819933 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" event={"ID":"b0310995-a7c7-47c3-ae6c-05daaaba92a6","Type":"ContainerStarted","Data":"e56ac830346c0f85227bcebc9b76c2af8646767d4678934985d7b1f5846dcb82"} Jan 29 12:07:00 crc kubenswrapper[4753]: I0129 12:07:00.820000 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" event={"ID":"b0310995-a7c7-47c3-ae6c-05daaaba92a6","Type":"ContainerStarted","Data":"8391a68625fc3d8fdbc4f16b0b7b25e359798d08ce65ad8f482722910873418a"} Jan 29 12:07:00 crc kubenswrapper[4753]: I0129 12:07:00.828346 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"7962d88e183df5c4d91f5767f2007068ea2fdfd5c31d9d6ed609c2ab9ca26999"} Jan 29 12:07:00 crc kubenswrapper[4753]: I0129 12:07:00.829214 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-5m2jf" event={"ID":"0203345d-1e9f-4cfe-bde7-90f87221d1a6","Type":"ContainerStarted","Data":"fdf3d79084d3e17b1f8cb0b285fd425280f34724c6a441104a2ec52b5217bfdf"} Jan 29 12:07:00 crc kubenswrapper[4753]: I0129 12:07:00.835946 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-rnbz9" event={"ID":"b372210b-6e1b-4a80-b379-7c1d570712f3","Type":"ContainerStarted","Data":"6407577e3d62f1bd1d6fce561989dc3742731426ad2a82a0beb2cf487b167bfe"} Jan 29 12:07:00 crc kubenswrapper[4753]: I0129 12:07:00.847958 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"703a6b9d-a15d-4ed3-b00b-db7bd5d42c61\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cab3ab80a8fddbc591cd07fdbed800f525bb8a1b5505bf818ebb59cbcce73003\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7af8be0836a8ff7d08ff48d9487acdedd4b83127538d071d13b3215400c0f8de\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91d57a9cede640bdcec325175598b72dd00398436b730248728c7d9afd545b1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://34151f1c4f743e8fa0a2c27e644bbea9c3911e2252501ad8aa95ad3e4346222a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34151f1c4f743e8fa0a2c27e644bbea9c3911e2252501ad8aa95ad3e4346222a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T12:06:54Z\\\",\\\"message\\\":\\\"ed a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3028279136/tls.crt::/tmp/serving-cert-3028279136/tls.key\\\\\\\"\\\\nI0129 12:06:54.190978 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 12:06:54.198044 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 12:06:54.198078 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 12:06:54.198139 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 12:06:54.198147 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 12:06:54.208089 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 12:06:54.208122 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 12:06:54.208128 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 12:06:54.208132 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 12:06:54.208135 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 12:06:54.208139 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 12:06:54.208143 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 12:06:54.208164 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI0129 12:06:54.212380 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-3028279136/tls.crt::/tmp/serving-cert-3028279136/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1769688397\\\\\\\\\\\\\\\" (2026-01-29 12:06:37 +0000 UTC to 2026-02-28 12:06:38 +0000 UTC (now=2026-01-29 12:06:54.212317535 +0000 UTC))\\\\\\\"\\\\nF0129 12:06:54.212498 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e02d977e34888952623f4ced5e073d7bf1ff360f8deb435dc577d2ecf146cb2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:33Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6538a934d097cd5a295952f64961a0a8fb5bfa5ebcd27a0a4dd3638029b9cdb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6538a934d097cd5a295952f64961a0a8fb5bfa5ebcd27a0a4dd3638029b9cdb1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:00Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:00 crc kubenswrapper[4753]: I0129 12:07:00.877751 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:00Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:00 crc kubenswrapper[4753]: I0129 12:07:00.888072 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 12:07:00 crc kubenswrapper[4753]: I0129 12:07:00.888359 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 12:07:00 crc kubenswrapper[4753]: E0129 12:07:00.888534 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 12:07:00 crc kubenswrapper[4753]: E0129 12:07:00.889022 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 12:07:00 crc kubenswrapper[4753]: I0129 12:07:00.895693 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-x8kzr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d753320a-61c2-4c0e-bd48-96d74b352114\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c7a667b7243fc36ff18cb4c017152fff3d5f4eb5e28e306ace44420cd722c5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tgns4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-x8kzr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:00Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:00 crc kubenswrapper[4753]: I0129 12:07:00.917329 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vwcjk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a99ab890-0a5a-4abb-86fb-a3731ff6b2c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:59Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vwcjk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:00Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:00 crc kubenswrapper[4753]: I0129 12:07:00.935127 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b108c546-a8c1-4bf9-814b-bfb5871b3013\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9842d9d6fed8c87bc20835964d3271b0be3117fedc658e6ef028db422d1ad478\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4fd4169036d3a646c9e5e4bfa8d86a92c557c3d8f77608ae5308dd0b8c45517\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b4a6bbb431e321e54013c96e3cca1537283cbdf65d89a978780550d603663fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://31bd7db7bddd3e16cb455f7af9fd3a9bca49919c8f1ffb676dec83c543ba7763\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:00Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.253900 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/80bec2ab-0a88-4818-9339-760edda3b07e-host-cni-netd\") pod \"ovnkube-node-nzkvz\" (UID: \"80bec2ab-0a88-4818-9339-760edda3b07e\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.254030 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/80bec2ab-0a88-4818-9339-760edda3b07e-ovnkube-script-lib\") pod \"ovnkube-node-nzkvz\" (UID: \"80bec2ab-0a88-4818-9339-760edda3b07e\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.254057 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/80bec2ab-0a88-4818-9339-760edda3b07e-systemd-units\") pod \"ovnkube-node-nzkvz\" (UID: \"80bec2ab-0a88-4818-9339-760edda3b07e\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.254109 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/80bec2ab-0a88-4818-9339-760edda3b07e-var-lib-openvswitch\") pod \"ovnkube-node-nzkvz\" (UID: \"80bec2ab-0a88-4818-9339-760edda3b07e\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.254126 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nkh6d\" (UniqueName: \"kubernetes.io/projected/80bec2ab-0a88-4818-9339-760edda3b07e-kube-api-access-nkh6d\") pod \"ovnkube-node-nzkvz\" (UID: \"80bec2ab-0a88-4818-9339-760edda3b07e\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.254185 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/80bec2ab-0a88-4818-9339-760edda3b07e-host-kubelet\") pod \"ovnkube-node-nzkvz\" (UID: \"80bec2ab-0a88-4818-9339-760edda3b07e\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.254216 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/80bec2ab-0a88-4818-9339-760edda3b07e-run-ovn\") pod \"ovnkube-node-nzkvz\" (UID: \"80bec2ab-0a88-4818-9339-760edda3b07e\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.254262 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/80bec2ab-0a88-4818-9339-760edda3b07e-ovn-node-metrics-cert\") pod \"ovnkube-node-nzkvz\" (UID: \"80bec2ab-0a88-4818-9339-760edda3b07e\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.254286 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/80bec2ab-0a88-4818-9339-760edda3b07e-log-socket\") pod \"ovnkube-node-nzkvz\" (UID: \"80bec2ab-0a88-4818-9339-760edda3b07e\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.254322 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/80bec2ab-0a88-4818-9339-760edda3b07e-run-systemd\") pod \"ovnkube-node-nzkvz\" (UID: \"80bec2ab-0a88-4818-9339-760edda3b07e\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.254349 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/80bec2ab-0a88-4818-9339-760edda3b07e-ovnkube-config\") pod \"ovnkube-node-nzkvz\" (UID: \"80bec2ab-0a88-4818-9339-760edda3b07e\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.254425 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/80bec2ab-0a88-4818-9339-760edda3b07e-host-cni-bin\") pod \"ovnkube-node-nzkvz\" (UID: \"80bec2ab-0a88-4818-9339-760edda3b07e\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.254481 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/80bec2ab-0a88-4818-9339-760edda3b07e-env-overrides\") pod \"ovnkube-node-nzkvz\" (UID: \"80bec2ab-0a88-4818-9339-760edda3b07e\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.254548 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/80bec2ab-0a88-4818-9339-760edda3b07e-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-nzkvz\" (UID: \"80bec2ab-0a88-4818-9339-760edda3b07e\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.254573 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/80bec2ab-0a88-4818-9339-760edda3b07e-etc-openvswitch\") pod \"ovnkube-node-nzkvz\" (UID: \"80bec2ab-0a88-4818-9339-760edda3b07e\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.254608 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/80bec2ab-0a88-4818-9339-760edda3b07e-node-log\") pod \"ovnkube-node-nzkvz\" (UID: \"80bec2ab-0a88-4818-9339-760edda3b07e\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.254669 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/80bec2ab-0a88-4818-9339-760edda3b07e-host-run-netns\") pod \"ovnkube-node-nzkvz\" (UID: \"80bec2ab-0a88-4818-9339-760edda3b07e\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.254687 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/80bec2ab-0a88-4818-9339-760edda3b07e-host-run-ovn-kubernetes\") pod \"ovnkube-node-nzkvz\" (UID: \"80bec2ab-0a88-4818-9339-760edda3b07e\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.254723 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/80bec2ab-0a88-4818-9339-760edda3b07e-host-slash\") pod \"ovnkube-node-nzkvz\" (UID: \"80bec2ab-0a88-4818-9339-760edda3b07e\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.254765 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/80bec2ab-0a88-4818-9339-760edda3b07e-run-openvswitch\") pod \"ovnkube-node-nzkvz\" (UID: \"80bec2ab-0a88-4818-9339-760edda3b07e\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.257309 4753 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.258537 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/80bec2ab-0a88-4818-9339-760edda3b07e-host-cni-netd\") pod \"ovnkube-node-nzkvz\" (UID: \"80bec2ab-0a88-4818-9339-760edda3b07e\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.259918 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/80bec2ab-0a88-4818-9339-760edda3b07e-ovnkube-script-lib\") pod \"ovnkube-node-nzkvz\" (UID: \"80bec2ab-0a88-4818-9339-760edda3b07e\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.259964 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/80bec2ab-0a88-4818-9339-760edda3b07e-host-cni-bin\") pod \"ovnkube-node-nzkvz\" (UID: \"80bec2ab-0a88-4818-9339-760edda3b07e\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.260539 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/80bec2ab-0a88-4818-9339-760edda3b07e-systemd-units\") pod \"ovnkube-node-nzkvz\" (UID: \"80bec2ab-0a88-4818-9339-760edda3b07e\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.260543 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/80bec2ab-0a88-4818-9339-760edda3b07e-env-overrides\") pod \"ovnkube-node-nzkvz\" (UID: \"80bec2ab-0a88-4818-9339-760edda3b07e\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.261042 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/80bec2ab-0a88-4818-9339-760edda3b07e-host-run-ovn-kubernetes\") pod \"ovnkube-node-nzkvz\" (UID: \"80bec2ab-0a88-4818-9339-760edda3b07e\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.261062 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/80bec2ab-0a88-4818-9339-760edda3b07e-host-slash\") pod \"ovnkube-node-nzkvz\" (UID: \"80bec2ab-0a88-4818-9339-760edda3b07e\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.262496 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/80bec2ab-0a88-4818-9339-760edda3b07e-log-socket\") pod \"ovnkube-node-nzkvz\" (UID: \"80bec2ab-0a88-4818-9339-760edda3b07e\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.262528 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/80bec2ab-0a88-4818-9339-760edda3b07e-run-systemd\") pod \"ovnkube-node-nzkvz\" (UID: \"80bec2ab-0a88-4818-9339-760edda3b07e\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.262843 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/80bec2ab-0a88-4818-9339-760edda3b07e-run-openvswitch\") pod \"ovnkube-node-nzkvz\" (UID: \"80bec2ab-0a88-4818-9339-760edda3b07e\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.263198 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.263271 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.263284 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.263310 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/80bec2ab-0a88-4818-9339-760edda3b07e-etc-openvswitch\") pod \"ovnkube-node-nzkvz\" (UID: \"80bec2ab-0a88-4818-9339-760edda3b07e\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.263368 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/80bec2ab-0a88-4818-9339-760edda3b07e-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-nzkvz\" (UID: \"80bec2ab-0a88-4818-9339-760edda3b07e\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.263475 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/80bec2ab-0a88-4818-9339-760edda3b07e-node-log\") pod \"ovnkube-node-nzkvz\" (UID: \"80bec2ab-0a88-4818-9339-760edda3b07e\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.263518 4753 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.263589 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/80bec2ab-0a88-4818-9339-760edda3b07e-ovnkube-config\") pod \"ovnkube-node-nzkvz\" (UID: \"80bec2ab-0a88-4818-9339-760edda3b07e\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.263682 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/80bec2ab-0a88-4818-9339-760edda3b07e-host-run-netns\") pod \"ovnkube-node-nzkvz\" (UID: \"80bec2ab-0a88-4818-9339-760edda3b07e\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.263716 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/80bec2ab-0a88-4818-9339-760edda3b07e-var-lib-openvswitch\") pod \"ovnkube-node-nzkvz\" (UID: \"80bec2ab-0a88-4818-9339-760edda3b07e\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.263747 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/80bec2ab-0a88-4818-9339-760edda3b07e-host-kubelet\") pod \"ovnkube-node-nzkvz\" (UID: \"80bec2ab-0a88-4818-9339-760edda3b07e\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.263972 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/80bec2ab-0a88-4818-9339-760edda3b07e-run-ovn\") pod \"ovnkube-node-nzkvz\" (UID: \"80bec2ab-0a88-4818-9339-760edda3b07e\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.284437 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/80bec2ab-0a88-4818-9339-760edda3b07e-ovn-node-metrics-cert\") pod \"ovnkube-node-nzkvz\" (UID: \"80bec2ab-0a88-4818-9339-760edda3b07e\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.287271 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:01Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.305751 4753 kubelet_node_status.go:115] "Node was previously registered" node="crc" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.306364 4753 kubelet_node_status.go:79] "Successfully registered node" node="crc" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.308159 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nkh6d\" (UniqueName: \"kubernetes.io/projected/80bec2ab-0a88-4818-9339-760edda3b07e-kube-api-access-nkh6d\") pod \"ovnkube-node-nzkvz\" (UID: \"80bec2ab-0a88-4818-9339-760edda3b07e\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.308264 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.308315 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.308330 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.308350 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.308422 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:01Z","lastTransitionTime":"2026-01-29T12:07:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.353828 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:01Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:01 crc kubenswrapper[4753]: E0129 12:07:01.355179 4753 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7536a030-b66d-4df9-a1ec-9890f7ad99e7\\\",\\\"systemUUID\\\":\\\"ce89c1ce-259c-4b78-b348-3ef96afb6944\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:01Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.362392 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.362448 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.362488 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.362546 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.362563 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:01Z","lastTransitionTime":"2026-01-29T12:07:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:01 crc kubenswrapper[4753]: E0129 12:07:01.380426 4753 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7536a030-b66d-4df9-a1ec-9890f7ad99e7\\\",\\\"systemUUID\\\":\\\"ce89c1ce-259c-4b78-b348-3ef96afb6944\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:01Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.385368 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966eb8e59f82bacf20ebfa1b1d5bf9dc0a9cdb3b430ae83ed9bde106eb3fa521\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea7c92ec02a9cf6ec5fbd8efe22a066dfef9b48f042137964c5dd49578c99079\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:01Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.385779 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.385804 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.385812 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.385827 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.385838 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:01Z","lastTransitionTime":"2026-01-29T12:07:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.416240 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0310995-a7c7-47c3-ae6c-05daaaba92a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-txdmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-txdmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:58Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7c24x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:01Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.458550 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.473792 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rnbz9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b372210b-6e1b-4a80-b379-7c1d570712f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqzp6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:59Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rnbz9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:01Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:01 crc kubenswrapper[4753]: E0129 12:07:01.479817 4753 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7536a030-b66d-4df9-a1ec-9890f7ad99e7\\\",\\\"systemUUID\\\":\\\"ce89c1ce-259c-4b78-b348-3ef96afb6944\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:01Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.485050 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.485102 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.485121 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.485140 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.485151 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:01Z","lastTransitionTime":"2026-01-29T12:07:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.496970 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5d7ccf10988fd83d31d42be6f82403067a0cc50313c4d5dcddfb528507efffa4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:01Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:01 crc kubenswrapper[4753]: E0129 12:07:01.502566 4753 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7536a030-b66d-4df9-a1ec-9890f7ad99e7\\\",\\\"systemUUID\\\":\\\"ce89c1ce-259c-4b78-b348-3ef96afb6944\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:01Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.509631 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.509688 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.509710 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.509727 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.509738 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:01Z","lastTransitionTime":"2026-01-29T12:07:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.524516 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80bec2ab-0a88-4818-9339-760edda3b07e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:07:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-nzkvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:01Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:01 crc kubenswrapper[4753]: E0129 12:07:01.527752 4753 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7536a030-b66d-4df9-a1ec-9890f7ad99e7\\\",\\\"systemUUID\\\":\\\"ce89c1ce-259c-4b78-b348-3ef96afb6944\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:01Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:01 crc kubenswrapper[4753]: E0129 12:07:01.528076 4753 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.530045 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.530106 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.530122 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.530142 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.530168 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:01Z","lastTransitionTime":"2026-01-29T12:07:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.554763 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:01Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.578671 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-5m2jf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0203345d-1e9f-4cfe-bde7-90f87221d1a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zzwn6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:58Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-5m2jf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:01Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.608874 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7962d88e183df5c4d91f5767f2007068ea2fdfd5c31d9d6ed609c2ab9ca26999\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:01Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.633563 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.633631 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.633650 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.633683 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.633708 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:01Z","lastTransitionTime":"2026-01-29T12:07:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.647985 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-18 12:26:54.15013728 +0000 UTC Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.664306 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-5m2jf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0203345d-1e9f-4cfe-bde7-90f87221d1a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fdf3d79084d3e17b1f8cb0b285fd425280f34724c6a441104a2ec52b5217bfdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zzwn6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:58Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-5m2jf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:01Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.721065 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"703a6b9d-a15d-4ed3-b00b-db7bd5d42c61\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cab3ab80a8fddbc591cd07fdbed800f525bb8a1b5505bf818ebb59cbcce73003\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7af8be0836a8ff7d08ff48d9487acdedd4b83127538d071d13b3215400c0f8de\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91d57a9cede640bdcec325175598b72dd00398436b730248728c7d9afd545b1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://34151f1c4f743e8fa0a2c27e644bbea9c3911e2252501ad8aa95ad3e4346222a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34151f1c4f743e8fa0a2c27e644bbea9c3911e2252501ad8aa95ad3e4346222a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T12:06:54Z\\\",\\\"message\\\":\\\"ed a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3028279136/tls.crt::/tmp/serving-cert-3028279136/tls.key\\\\\\\"\\\\nI0129 12:06:54.190978 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 12:06:54.198044 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 12:06:54.198078 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 12:06:54.198139 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 12:06:54.198147 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 12:06:54.208089 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 12:06:54.208122 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 12:06:54.208128 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 12:06:54.208132 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 12:06:54.208135 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 12:06:54.208139 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 12:06:54.208143 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 12:06:54.208164 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI0129 12:06:54.212380 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-3028279136/tls.crt::/tmp/serving-cert-3028279136/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1769688397\\\\\\\\\\\\\\\" (2026-01-29 12:06:37 +0000 UTC to 2026-02-28 12:06:38 +0000 UTC (now=2026-01-29 12:06:54.212317535 +0000 UTC))\\\\\\\"\\\\nF0129 12:06:54.212498 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e02d977e34888952623f4ced5e073d7bf1ff360f8deb435dc577d2ecf146cb2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:33Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6538a934d097cd5a295952f64961a0a8fb5bfa5ebcd27a0a4dd3638029b9cdb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6538a934d097cd5a295952f64961a0a8fb5bfa5ebcd27a0a4dd3638029b9cdb1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:01Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.736249 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.736309 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.736321 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.736346 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.736359 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:01Z","lastTransitionTime":"2026-01-29T12:07:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.741585 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:01Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.838383 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-x8kzr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d753320a-61c2-4c0e-bd48-96d74b352114\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c7a667b7243fc36ff18cb4c017152fff3d5f4eb5e28e306ace44420cd722c5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tgns4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-x8kzr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:01Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.838735 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.838759 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.838768 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.838781 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.838790 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:01Z","lastTransitionTime":"2026-01-29T12:07:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.843078 4753 generic.go:334] "Generic (PLEG): container finished" podID="80bec2ab-0a88-4818-9339-760edda3b07e" containerID="87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c" exitCode=0 Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.843136 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" event={"ID":"80bec2ab-0a88-4818-9339-760edda3b07e","Type":"ContainerDied","Data":"87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c"} Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.843196 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" event={"ID":"80bec2ab-0a88-4818-9339-760edda3b07e","Type":"ContainerStarted","Data":"97c76242144b0e1c85e0be3bcdd7a4d811921f97855ef3ddd033bad24283bf21"} Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.844376 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-rnbz9" event={"ID":"b372210b-6e1b-4a80-b379-7c1d570712f3","Type":"ContainerStarted","Data":"346c36936603f4535d5ad3e6b60e451b05b4d376e2531c813f0c900c92caf5fd"} Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.846206 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-vwcjk" event={"ID":"a99ab890-0a5a-4abb-86fb-a3731ff6b2c1","Type":"ContainerStarted","Data":"bd5300a3195f0f4d66a2789abcd65b4ade5652b5d05eb08bfd29932b97137464"} Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.892081 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 12:07:01 crc kubenswrapper[4753]: E0129 12:07:01.892286 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.991312 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.991349 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.991358 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.991374 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.991386 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:01Z","lastTransitionTime":"2026-01-29T12:07:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:01 crc kubenswrapper[4753]: I0129 12:07:01.994771 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vwcjk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a99ab890-0a5a-4abb-86fb-a3731ff6b2c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:59Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vwcjk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:01Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:02 crc kubenswrapper[4753]: I0129 12:07:02.011119 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b108c546-a8c1-4bf9-814b-bfb5871b3013\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9842d9d6fed8c87bc20835964d3271b0be3117fedc658e6ef028db422d1ad478\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4fd4169036d3a646c9e5e4bfa8d86a92c557c3d8f77608ae5308dd0b8c45517\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b4a6bbb431e321e54013c96e3cca1537283cbdf65d89a978780550d603663fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://31bd7db7bddd3e16cb455f7af9fd3a9bca49919c8f1ffb676dec83c543ba7763\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:02Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:02 crc kubenswrapper[4753]: I0129 12:07:02.029517 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:02Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:02 crc kubenswrapper[4753]: I0129 12:07:02.052412 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:02Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:02 crc kubenswrapper[4753]: I0129 12:07:02.073953 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966eb8e59f82bacf20ebfa1b1d5bf9dc0a9cdb3b430ae83ed9bde106eb3fa521\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea7c92ec02a9cf6ec5fbd8efe22a066dfef9b48f042137964c5dd49578c99079\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:02Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:02 crc kubenswrapper[4753]: I0129 12:07:02.091650 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0310995-a7c7-47c3-ae6c-05daaaba92a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e56ac830346c0f85227bcebc9b76c2af8646767d4678934985d7b1f5846dcb82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-txdmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8391a68625fc3d8fdbc4f16b0b7b25e359798d08ce65ad8f482722910873418a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-txdmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:58Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7c24x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:02Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:02 crc kubenswrapper[4753]: I0129 12:07:02.095850 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:02 crc kubenswrapper[4753]: I0129 12:07:02.095884 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:02 crc kubenswrapper[4753]: I0129 12:07:02.095896 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:02 crc kubenswrapper[4753]: I0129 12:07:02.095919 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:02 crc kubenswrapper[4753]: I0129 12:07:02.095936 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:02Z","lastTransitionTime":"2026-01-29T12:07:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:02 crc kubenswrapper[4753]: I0129 12:07:02.479880 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:02 crc kubenswrapper[4753]: I0129 12:07:02.479937 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:02 crc kubenswrapper[4753]: I0129 12:07:02.480016 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:02 crc kubenswrapper[4753]: I0129 12:07:02.480047 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:02 crc kubenswrapper[4753]: I0129 12:07:02.483440 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:02Z","lastTransitionTime":"2026-01-29T12:07:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:02 crc kubenswrapper[4753]: I0129 12:07:02.546765 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rnbz9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b372210b-6e1b-4a80-b379-7c1d570712f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqzp6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:59Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rnbz9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:02Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:02 crc kubenswrapper[4753]: I0129 12:07:02.570726 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5d7ccf10988fd83d31d42be6f82403067a0cc50313c4d5dcddfb528507efffa4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:02Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:02 crc kubenswrapper[4753]: I0129 12:07:02.589313 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:02 crc kubenswrapper[4753]: I0129 12:07:02.589365 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:02 crc kubenswrapper[4753]: I0129 12:07:02.589384 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:02 crc kubenswrapper[4753]: I0129 12:07:02.589414 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:02 crc kubenswrapper[4753]: I0129 12:07:02.589433 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:02Z","lastTransitionTime":"2026-01-29T12:07:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:02 crc kubenswrapper[4753]: I0129 12:07:02.622712 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80bec2ab-0a88-4818-9339-760edda3b07e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:07:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-nzkvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:02Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:02 crc kubenswrapper[4753]: I0129 12:07:02.649302 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-13 18:14:15.717982397 +0000 UTC Jan 29 12:07:02 crc kubenswrapper[4753]: I0129 12:07:02.661365 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b108c546-a8c1-4bf9-814b-bfb5871b3013\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9842d9d6fed8c87bc20835964d3271b0be3117fedc658e6ef028db422d1ad478\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4fd4169036d3a646c9e5e4bfa8d86a92c557c3d8f77608ae5308dd0b8c45517\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b4a6bbb431e321e54013c96e3cca1537283cbdf65d89a978780550d603663fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://31bd7db7bddd3e16cb455f7af9fd3a9bca49919c8f1ffb676dec83c543ba7763\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:02Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:02 crc kubenswrapper[4753]: I0129 12:07:02.693639 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:02 crc kubenswrapper[4753]: I0129 12:07:02.693680 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:02 crc kubenswrapper[4753]: I0129 12:07:02.693690 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:02 crc kubenswrapper[4753]: I0129 12:07:02.693705 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:02 crc kubenswrapper[4753]: I0129 12:07:02.693714 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:02Z","lastTransitionTime":"2026-01-29T12:07:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:02 crc kubenswrapper[4753]: I0129 12:07:02.698292 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:02Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:02 crc kubenswrapper[4753]: I0129 12:07:02.725300 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:02Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:02 crc kubenswrapper[4753]: I0129 12:07:02.765799 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966eb8e59f82bacf20ebfa1b1d5bf9dc0a9cdb3b430ae83ed9bde106eb3fa521\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea7c92ec02a9cf6ec5fbd8efe22a066dfef9b48f042137964c5dd49578c99079\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:02Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:02 crc kubenswrapper[4753]: I0129 12:07:02.780540 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:07:02 crc kubenswrapper[4753]: E0129 12:07:02.780798 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:07:10.780758735 +0000 UTC m=+45.032840190 (durationBeforeRetry 8s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:07:02 crc kubenswrapper[4753]: I0129 12:07:02.783936 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0310995-a7c7-47c3-ae6c-05daaaba92a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e56ac830346c0f85227bcebc9b76c2af8646767d4678934985d7b1f5846dcb82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-txdmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8391a68625fc3d8fdbc4f16b0b7b25e359798d08ce65ad8f482722910873418a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-txdmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:58Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7c24x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:02Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:02 crc kubenswrapper[4753]: I0129 12:07:02.799753 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:02 crc kubenswrapper[4753]: I0129 12:07:02.799818 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:02 crc kubenswrapper[4753]: I0129 12:07:02.799833 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:02 crc kubenswrapper[4753]: I0129 12:07:02.799862 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:02 crc kubenswrapper[4753]: I0129 12:07:02.799877 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:02Z","lastTransitionTime":"2026-01-29T12:07:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:02 crc kubenswrapper[4753]: I0129 12:07:02.843968 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rnbz9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b372210b-6e1b-4a80-b379-7c1d570712f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://346c36936603f4535d5ad3e6b60e451b05b4d376e2531c813f0c900c92caf5fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqzp6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:59Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rnbz9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:02Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:02 crc kubenswrapper[4753]: I0129 12:07:02.887953 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 12:07:02 crc kubenswrapper[4753]: E0129 12:07:02.888104 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 12:07:02 crc kubenswrapper[4753]: I0129 12:07:02.888174 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 12:07:02 crc kubenswrapper[4753]: E0129 12:07:02.888248 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 12:07:02 crc kubenswrapper[4753]: I0129 12:07:02.900997 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80bec2ab-0a88-4818-9339-760edda3b07e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:07:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-nzkvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:02Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:02 crc kubenswrapper[4753]: I0129 12:07:02.903953 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:02 crc kubenswrapper[4753]: I0129 12:07:02.904000 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:02 crc kubenswrapper[4753]: I0129 12:07:02.904013 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:02 crc kubenswrapper[4753]: I0129 12:07:02.904031 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:02 crc kubenswrapper[4753]: I0129 12:07:02.904042 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:02Z","lastTransitionTime":"2026-01-29T12:07:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:02 crc kubenswrapper[4753]: I0129 12:07:02.983367 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 12:07:02 crc kubenswrapper[4753]: I0129 12:07:02.983486 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 12:07:02 crc kubenswrapper[4753]: E0129 12:07:02.983618 4753 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 29 12:07:02 crc kubenswrapper[4753]: E0129 12:07:02.983709 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-29 12:07:10.983682941 +0000 UTC m=+45.235764396 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 29 12:07:02 crc kubenswrapper[4753]: E0129 12:07:02.984978 4753 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 29 12:07:02 crc kubenswrapper[4753]: E0129 12:07:02.985053 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-29 12:07:10.985032331 +0000 UTC m=+45.237113786 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 29 12:07:03 crc kubenswrapper[4753]: I0129 12:07:03.004455 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5d7ccf10988fd83d31d42be6f82403067a0cc50313c4d5dcddfb528507efffa4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:03Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:03 crc kubenswrapper[4753]: I0129 12:07:03.006313 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:03 crc kubenswrapper[4753]: I0129 12:07:03.006369 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:03 crc kubenswrapper[4753]: I0129 12:07:03.006382 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:03 crc kubenswrapper[4753]: I0129 12:07:03.006402 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:03 crc kubenswrapper[4753]: I0129 12:07:03.006414 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:03Z","lastTransitionTime":"2026-01-29T12:07:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:03 crc kubenswrapper[4753]: I0129 12:07:03.031084 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7962d88e183df5c4d91f5767f2007068ea2fdfd5c31d9d6ed609c2ab9ca26999\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:03Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:03 crc kubenswrapper[4753]: I0129 12:07:03.077160 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-5m2jf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0203345d-1e9f-4cfe-bde7-90f87221d1a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fdf3d79084d3e17b1f8cb0b285fd425280f34724c6a441104a2ec52b5217bfdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zzwn6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:58Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-5m2jf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:03Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:03 crc kubenswrapper[4753]: I0129 12:07:03.148652 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:03 crc kubenswrapper[4753]: I0129 12:07:03.148704 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:03 crc kubenswrapper[4753]: I0129 12:07:03.148715 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:03 crc kubenswrapper[4753]: I0129 12:07:03.148736 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:03 crc kubenswrapper[4753]: I0129 12:07:03.148745 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:03Z","lastTransitionTime":"2026-01-29T12:07:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:03 crc kubenswrapper[4753]: I0129 12:07:03.252481 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:03 crc kubenswrapper[4753]: I0129 12:07:03.252538 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:03 crc kubenswrapper[4753]: I0129 12:07:03.252582 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:03 crc kubenswrapper[4753]: I0129 12:07:03.252610 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:03 crc kubenswrapper[4753]: I0129 12:07:03.252641 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:03Z","lastTransitionTime":"2026-01-29T12:07:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:03 crc kubenswrapper[4753]: I0129 12:07:03.367466 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:03 crc kubenswrapper[4753]: I0129 12:07:03.367519 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:03 crc kubenswrapper[4753]: I0129 12:07:03.367531 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:03 crc kubenswrapper[4753]: I0129 12:07:03.367554 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:03 crc kubenswrapper[4753]: I0129 12:07:03.367565 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:03Z","lastTransitionTime":"2026-01-29T12:07:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:03 crc kubenswrapper[4753]: I0129 12:07:03.383004 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"703a6b9d-a15d-4ed3-b00b-db7bd5d42c61\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cab3ab80a8fddbc591cd07fdbed800f525bb8a1b5505bf818ebb59cbcce73003\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7af8be0836a8ff7d08ff48d9487acdedd4b83127538d071d13b3215400c0f8de\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91d57a9cede640bdcec325175598b72dd00398436b730248728c7d9afd545b1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://34151f1c4f743e8fa0a2c27e644bbea9c3911e2252501ad8aa95ad3e4346222a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34151f1c4f743e8fa0a2c27e644bbea9c3911e2252501ad8aa95ad3e4346222a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T12:06:54Z\\\",\\\"message\\\":\\\"ed a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3028279136/tls.crt::/tmp/serving-cert-3028279136/tls.key\\\\\\\"\\\\nI0129 12:06:54.190978 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 12:06:54.198044 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 12:06:54.198078 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 12:06:54.198139 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 12:06:54.198147 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 12:06:54.208089 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 12:06:54.208122 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 12:06:54.208128 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 12:06:54.208132 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 12:06:54.208135 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 12:06:54.208139 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 12:06:54.208143 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 12:06:54.208164 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI0129 12:06:54.212380 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-3028279136/tls.crt::/tmp/serving-cert-3028279136/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1769688397\\\\\\\\\\\\\\\" (2026-01-29 12:06:37 +0000 UTC to 2026-02-28 12:06:38 +0000 UTC (now=2026-01-29 12:06:54.212317535 +0000 UTC))\\\\\\\"\\\\nF0129 12:06:54.212498 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e02d977e34888952623f4ced5e073d7bf1ff360f8deb435dc577d2ecf146cb2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:33Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6538a934d097cd5a295952f64961a0a8fb5bfa5ebcd27a0a4dd3638029b9cdb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6538a934d097cd5a295952f64961a0a8fb5bfa5ebcd27a0a4dd3638029b9cdb1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:03Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:03 crc kubenswrapper[4753]: I0129 12:07:03.471928 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:03 crc kubenswrapper[4753]: I0129 12:07:03.471980 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:03 crc kubenswrapper[4753]: I0129 12:07:03.472002 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:03 crc kubenswrapper[4753]: I0129 12:07:03.472031 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:03 crc kubenswrapper[4753]: I0129 12:07:03.472043 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:03Z","lastTransitionTime":"2026-01-29T12:07:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:03 crc kubenswrapper[4753]: I0129 12:07:03.507066 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:03Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:03 crc kubenswrapper[4753]: I0129 12:07:03.650710 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-19 00:37:32.18712364 +0000 UTC Jan 29 12:07:03 crc kubenswrapper[4753]: I0129 12:07:03.699409 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:03 crc kubenswrapper[4753]: I0129 12:07:03.699442 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:03 crc kubenswrapper[4753]: I0129 12:07:03.699452 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:03 crc kubenswrapper[4753]: I0129 12:07:03.699467 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:03 crc kubenswrapper[4753]: I0129 12:07:03.699476 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:03Z","lastTransitionTime":"2026-01-29T12:07:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:03 crc kubenswrapper[4753]: I0129 12:07:03.703405 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-x8kzr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d753320a-61c2-4c0e-bd48-96d74b352114\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c7a667b7243fc36ff18cb4c017152fff3d5f4eb5e28e306ace44420cd722c5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tgns4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-x8kzr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:03Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:04 crc kubenswrapper[4753]: I0129 12:07:04.041072 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 12:07:04 crc kubenswrapper[4753]: I0129 12:07:04.041147 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 12:07:04 crc kubenswrapper[4753]: E0129 12:07:04.041587 4753 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 29 12:07:04 crc kubenswrapper[4753]: E0129 12:07:04.041627 4753 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 29 12:07:04 crc kubenswrapper[4753]: E0129 12:07:04.041669 4753 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 12:07:04 crc kubenswrapper[4753]: E0129 12:07:04.041920 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-29 12:07:12.041849066 +0000 UTC m=+46.293930521 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 12:07:04 crc kubenswrapper[4753]: E0129 12:07:04.041587 4753 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 29 12:07:04 crc kubenswrapper[4753]: E0129 12:07:04.041966 4753 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 29 12:07:04 crc kubenswrapper[4753]: E0129 12:07:04.041990 4753 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 12:07:04 crc kubenswrapper[4753]: E0129 12:07:04.042103 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-29 12:07:12.042018221 +0000 UTC m=+46.294099676 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 12:07:04 crc kubenswrapper[4753]: I0129 12:07:04.043202 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 12:07:04 crc kubenswrapper[4753]: E0129 12:07:04.043475 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 12:07:04 crc kubenswrapper[4753]: I0129 12:07:04.048365 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:04 crc kubenswrapper[4753]: I0129 12:07:04.048437 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:04 crc kubenswrapper[4753]: I0129 12:07:04.048475 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:04 crc kubenswrapper[4753]: I0129 12:07:04.048495 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:04 crc kubenswrapper[4753]: I0129 12:07:04.048508 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:04Z","lastTransitionTime":"2026-01-29T12:07:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:04 crc kubenswrapper[4753]: I0129 12:07:04.072197 4753 generic.go:334] "Generic (PLEG): container finished" podID="a99ab890-0a5a-4abb-86fb-a3731ff6b2c1" containerID="bd5300a3195f0f4d66a2789abcd65b4ade5652b5d05eb08bfd29932b97137464" exitCode=0 Jan 29 12:07:04 crc kubenswrapper[4753]: I0129 12:07:04.072459 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-vwcjk" event={"ID":"a99ab890-0a5a-4abb-86fb-a3731ff6b2c1","Type":"ContainerDied","Data":"bd5300a3195f0f4d66a2789abcd65b4ade5652b5d05eb08bfd29932b97137464"} Jan 29 12:07:04 crc kubenswrapper[4753]: I0129 12:07:04.105548 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" event={"ID":"80bec2ab-0a88-4818-9339-760edda3b07e","Type":"ContainerStarted","Data":"05d1e99596530f467ecdae51aec8db191ac045cd85e121fc9835e2f3688f6ae7"} Jan 29 12:07:04 crc kubenswrapper[4753]: I0129 12:07:04.105661 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" event={"ID":"80bec2ab-0a88-4818-9339-760edda3b07e","Type":"ContainerStarted","Data":"91c1df8b6763320498420463a3841f0bfa4fcf753c355f549c3c1e93bf5206a8"} Jan 29 12:07:04 crc kubenswrapper[4753]: I0129 12:07:04.105899 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 12:07:04 crc kubenswrapper[4753]: E0129 12:07:04.106374 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 12:07:04 crc kubenswrapper[4753]: I0129 12:07:04.107627 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vwcjk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a99ab890-0a5a-4abb-86fb-a3731ff6b2c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd5300a3195f0f4d66a2789abcd65b4ade5652b5d05eb08bfd29932b97137464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:59Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vwcjk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:04Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:04 crc kubenswrapper[4753]: I0129 12:07:04.140363 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7962d88e183df5c4d91f5767f2007068ea2fdfd5c31d9d6ed609c2ab9ca26999\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:04Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:04 crc kubenswrapper[4753]: I0129 12:07:04.161193 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:04 crc kubenswrapper[4753]: I0129 12:07:04.161247 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:04 crc kubenswrapper[4753]: I0129 12:07:04.161258 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:04 crc kubenswrapper[4753]: I0129 12:07:04.161276 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:04 crc kubenswrapper[4753]: I0129 12:07:04.161287 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:04Z","lastTransitionTime":"2026-01-29T12:07:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:04 crc kubenswrapper[4753]: I0129 12:07:04.162311 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-5m2jf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0203345d-1e9f-4cfe-bde7-90f87221d1a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fdf3d79084d3e17b1f8cb0b285fd425280f34724c6a441104a2ec52b5217bfdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zzwn6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:58Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-5m2jf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:04Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:04 crc kubenswrapper[4753]: I0129 12:07:04.298488 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:04 crc kubenswrapper[4753]: I0129 12:07:04.298547 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:04 crc kubenswrapper[4753]: I0129 12:07:04.298561 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:04 crc kubenswrapper[4753]: I0129 12:07:04.298586 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:04 crc kubenswrapper[4753]: I0129 12:07:04.298611 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:04Z","lastTransitionTime":"2026-01-29T12:07:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:04 crc kubenswrapper[4753]: I0129 12:07:04.299518 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"703a6b9d-a15d-4ed3-b00b-db7bd5d42c61\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cab3ab80a8fddbc591cd07fdbed800f525bb8a1b5505bf818ebb59cbcce73003\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7af8be0836a8ff7d08ff48d9487acdedd4b83127538d071d13b3215400c0f8de\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91d57a9cede640bdcec325175598b72dd00398436b730248728c7d9afd545b1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://34151f1c4f743e8fa0a2c27e644bbea9c3911e2252501ad8aa95ad3e4346222a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34151f1c4f743e8fa0a2c27e644bbea9c3911e2252501ad8aa95ad3e4346222a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T12:06:54Z\\\",\\\"message\\\":\\\"ed a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3028279136/tls.crt::/tmp/serving-cert-3028279136/tls.key\\\\\\\"\\\\nI0129 12:06:54.190978 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 12:06:54.198044 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 12:06:54.198078 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 12:06:54.198139 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 12:06:54.198147 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 12:06:54.208089 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 12:06:54.208122 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 12:06:54.208128 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 12:06:54.208132 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 12:06:54.208135 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 12:06:54.208139 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 12:06:54.208143 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 12:06:54.208164 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI0129 12:06:54.212380 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-3028279136/tls.crt::/tmp/serving-cert-3028279136/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1769688397\\\\\\\\\\\\\\\" (2026-01-29 12:06:37 +0000 UTC to 2026-02-28 12:06:38 +0000 UTC (now=2026-01-29 12:06:54.212317535 +0000 UTC))\\\\\\\"\\\\nF0129 12:06:54.212498 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e02d977e34888952623f4ced5e073d7bf1ff360f8deb435dc577d2ecf146cb2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:33Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6538a934d097cd5a295952f64961a0a8fb5bfa5ebcd27a0a4dd3638029b9cdb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6538a934d097cd5a295952f64961a0a8fb5bfa5ebcd27a0a4dd3638029b9cdb1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:04Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:04 crc kubenswrapper[4753]: I0129 12:07:04.650538 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:04Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:04 crc kubenswrapper[4753]: I0129 12:07:04.653195 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:04 crc kubenswrapper[4753]: I0129 12:07:04.653258 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:04 crc kubenswrapper[4753]: I0129 12:07:04.653287 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:04 crc kubenswrapper[4753]: I0129 12:07:04.653320 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:04 crc kubenswrapper[4753]: I0129 12:07:04.653357 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:04Z","lastTransitionTime":"2026-01-29T12:07:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:04 crc kubenswrapper[4753]: I0129 12:07:04.658150 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-12 15:16:10.649878294 +0000 UTC Jan 29 12:07:04 crc kubenswrapper[4753]: I0129 12:07:04.673496 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-x8kzr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d753320a-61c2-4c0e-bd48-96d74b352114\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c7a667b7243fc36ff18cb4c017152fff3d5f4eb5e28e306ace44420cd722c5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tgns4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-x8kzr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:04Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:04 crc kubenswrapper[4753]: I0129 12:07:04.698849 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vwcjk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a99ab890-0a5a-4abb-86fb-a3731ff6b2c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd5300a3195f0f4d66a2789abcd65b4ade5652b5d05eb08bfd29932b97137464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd5300a3195f0f4d66a2789abcd65b4ade5652b5d05eb08bfd29932b97137464\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:59Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vwcjk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:04Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:04 crc kubenswrapper[4753]: I0129 12:07:04.718647 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b108c546-a8c1-4bf9-814b-bfb5871b3013\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9842d9d6fed8c87bc20835964d3271b0be3117fedc658e6ef028db422d1ad478\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4fd4169036d3a646c9e5e4bfa8d86a92c557c3d8f77608ae5308dd0b8c45517\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b4a6bbb431e321e54013c96e3cca1537283cbdf65d89a978780550d603663fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://31bd7db7bddd3e16cb455f7af9fd3a9bca49919c8f1ffb676dec83c543ba7763\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:04Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:04 crc kubenswrapper[4753]: I0129 12:07:04.738875 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:04Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:04 crc kubenswrapper[4753]: I0129 12:07:04.756152 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:04Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:04 crc kubenswrapper[4753]: I0129 12:07:04.782802 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966eb8e59f82bacf20ebfa1b1d5bf9dc0a9cdb3b430ae83ed9bde106eb3fa521\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea7c92ec02a9cf6ec5fbd8efe22a066dfef9b48f042137964c5dd49578c99079\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:04Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:04 crc kubenswrapper[4753]: I0129 12:07:04.785496 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:04 crc kubenswrapper[4753]: I0129 12:07:04.785562 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:04 crc kubenswrapper[4753]: I0129 12:07:04.785577 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:04 crc kubenswrapper[4753]: I0129 12:07:04.785599 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:04 crc kubenswrapper[4753]: I0129 12:07:04.785612 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:04Z","lastTransitionTime":"2026-01-29T12:07:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:04 crc kubenswrapper[4753]: I0129 12:07:04.798860 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0310995-a7c7-47c3-ae6c-05daaaba92a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e56ac830346c0f85227bcebc9b76c2af8646767d4678934985d7b1f5846dcb82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-txdmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8391a68625fc3d8fdbc4f16b0b7b25e359798d08ce65ad8f482722910873418a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-txdmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:58Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7c24x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:04Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:04 crc kubenswrapper[4753]: I0129 12:07:04.817651 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rnbz9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b372210b-6e1b-4a80-b379-7c1d570712f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://346c36936603f4535d5ad3e6b60e451b05b4d376e2531c813f0c900c92caf5fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqzp6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:59Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rnbz9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:04Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:04 crc kubenswrapper[4753]: I0129 12:07:04.882401 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80bec2ab-0a88-4818-9339-760edda3b07e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:07:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-nzkvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:04Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:04 crc kubenswrapper[4753]: I0129 12:07:04.887691 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 12:07:04 crc kubenswrapper[4753]: E0129 12:07:04.887968 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 12:07:04 crc kubenswrapper[4753]: I0129 12:07:04.888831 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:04 crc kubenswrapper[4753]: I0129 12:07:04.888860 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:04 crc kubenswrapper[4753]: I0129 12:07:04.888870 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:04 crc kubenswrapper[4753]: I0129 12:07:04.888891 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:04 crc kubenswrapper[4753]: I0129 12:07:04.888907 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:04Z","lastTransitionTime":"2026-01-29T12:07:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:04 crc kubenswrapper[4753]: I0129 12:07:04.905188 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5d7ccf10988fd83d31d42be6f82403067a0cc50313c4d5dcddfb528507efffa4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:04Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:05 crc kubenswrapper[4753]: I0129 12:07:05.135152 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:05 crc kubenswrapper[4753]: I0129 12:07:05.135202 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:05 crc kubenswrapper[4753]: I0129 12:07:05.135215 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:05 crc kubenswrapper[4753]: I0129 12:07:05.135265 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:05 crc kubenswrapper[4753]: I0129 12:07:05.135281 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:05Z","lastTransitionTime":"2026-01-29T12:07:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:05 crc kubenswrapper[4753]: I0129 12:07:05.170785 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" event={"ID":"80bec2ab-0a88-4818-9339-760edda3b07e","Type":"ContainerStarted","Data":"0f15016e29f1fc14f51b4d01e772071333fd8a885cacc1804b3a03dbcfc93836"} Jan 29 12:07:05 crc kubenswrapper[4753]: I0129 12:07:05.171159 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" event={"ID":"80bec2ab-0a88-4818-9339-760edda3b07e","Type":"ContainerStarted","Data":"1e6a7d8df812546f17b3e026005c5062086e97e3fa364b3fa22cefec025e4eba"} Jan 29 12:07:05 crc kubenswrapper[4753]: I0129 12:07:05.241148 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:05 crc kubenswrapper[4753]: I0129 12:07:05.241201 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:05 crc kubenswrapper[4753]: I0129 12:07:05.241260 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:05 crc kubenswrapper[4753]: I0129 12:07:05.241293 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:05 crc kubenswrapper[4753]: I0129 12:07:05.241309 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:05Z","lastTransitionTime":"2026-01-29T12:07:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:05 crc kubenswrapper[4753]: I0129 12:07:05.350079 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:05 crc kubenswrapper[4753]: I0129 12:07:05.350149 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:05 crc kubenswrapper[4753]: I0129 12:07:05.350162 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:05 crc kubenswrapper[4753]: I0129 12:07:05.350190 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:05 crc kubenswrapper[4753]: I0129 12:07:05.350261 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:05Z","lastTransitionTime":"2026-01-29T12:07:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:05 crc kubenswrapper[4753]: I0129 12:07:05.479868 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:05 crc kubenswrapper[4753]: I0129 12:07:05.479921 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:05 crc kubenswrapper[4753]: I0129 12:07:05.479936 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:05 crc kubenswrapper[4753]: I0129 12:07:05.479962 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:05 crc kubenswrapper[4753]: I0129 12:07:05.479974 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:05Z","lastTransitionTime":"2026-01-29T12:07:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:05 crc kubenswrapper[4753]: I0129 12:07:05.583056 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:05 crc kubenswrapper[4753]: I0129 12:07:05.583100 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:05 crc kubenswrapper[4753]: I0129 12:07:05.583126 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:05 crc kubenswrapper[4753]: I0129 12:07:05.583143 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:05 crc kubenswrapper[4753]: I0129 12:07:05.583153 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:05Z","lastTransitionTime":"2026-01-29T12:07:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:05 crc kubenswrapper[4753]: I0129 12:07:05.658658 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-22 21:56:38.556241935 +0000 UTC Jan 29 12:07:05 crc kubenswrapper[4753]: I0129 12:07:05.996558 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 12:07:05 crc kubenswrapper[4753]: E0129 12:07:05.996809 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 12:07:06 crc kubenswrapper[4753]: I0129 12:07:06.003901 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:06 crc kubenswrapper[4753]: I0129 12:07:06.003998 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:06 crc kubenswrapper[4753]: I0129 12:07:06.004018 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:06 crc kubenswrapper[4753]: I0129 12:07:06.004044 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:06 crc kubenswrapper[4753]: I0129 12:07:06.004058 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:06Z","lastTransitionTime":"2026-01-29T12:07:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:06 crc kubenswrapper[4753]: I0129 12:07:06.019343 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 12:07:06 crc kubenswrapper[4753]: E0129 12:07:06.030799 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 12:07:06 crc kubenswrapper[4753]: I0129 12:07:06.108416 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:06 crc kubenswrapper[4753]: I0129 12:07:06.108467 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:06 crc kubenswrapper[4753]: I0129 12:07:06.108478 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:06 crc kubenswrapper[4753]: I0129 12:07:06.108500 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:06 crc kubenswrapper[4753]: I0129 12:07:06.108511 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:06Z","lastTransitionTime":"2026-01-29T12:07:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:06 crc kubenswrapper[4753]: I0129 12:07:06.189845 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-vwcjk" event={"ID":"a99ab890-0a5a-4abb-86fb-a3731ff6b2c1","Type":"ContainerStarted","Data":"d95b548f1e6e754ff4a6f8d68364586d68b03662d0a679197fd0c99c41745691"} Jan 29 12:07:06 crc kubenswrapper[4753]: I0129 12:07:06.197161 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" event={"ID":"80bec2ab-0a88-4818-9339-760edda3b07e","Type":"ContainerStarted","Data":"b0ec2a21ec299b5eed363668f39a270a8b03ea79b9de3dd23bda0c30579c3de3"} Jan 29 12:07:06 crc kubenswrapper[4753]: I0129 12:07:06.376567 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:06 crc kubenswrapper[4753]: I0129 12:07:06.376618 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:06 crc kubenswrapper[4753]: I0129 12:07:06.376629 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:06 crc kubenswrapper[4753]: I0129 12:07:06.376645 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:06 crc kubenswrapper[4753]: I0129 12:07:06.376656 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:06Z","lastTransitionTime":"2026-01-29T12:07:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:06 crc kubenswrapper[4753]: I0129 12:07:06.481294 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:06 crc kubenswrapper[4753]: I0129 12:07:06.481352 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:06 crc kubenswrapper[4753]: I0129 12:07:06.481363 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:06 crc kubenswrapper[4753]: I0129 12:07:06.481396 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:06 crc kubenswrapper[4753]: I0129 12:07:06.481415 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:06Z","lastTransitionTime":"2026-01-29T12:07:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:06 crc kubenswrapper[4753]: I0129 12:07:06.585645 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:06 crc kubenswrapper[4753]: I0129 12:07:06.585677 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:06 crc kubenswrapper[4753]: I0129 12:07:06.585684 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:06 crc kubenswrapper[4753]: I0129 12:07:06.585698 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:06 crc kubenswrapper[4753]: I0129 12:07:06.585716 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:06Z","lastTransitionTime":"2026-01-29T12:07:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:06 crc kubenswrapper[4753]: I0129 12:07:06.629301 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7962d88e183df5c4d91f5767f2007068ea2fdfd5c31d9d6ed609c2ab9ca26999\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:06Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:06 crc kubenswrapper[4753]: I0129 12:07:06.658985 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-22 12:56:00.701468333 +0000 UTC Jan 29 12:07:06 crc kubenswrapper[4753]: I0129 12:07:06.688030 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:06 crc kubenswrapper[4753]: I0129 12:07:06.688085 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:06 crc kubenswrapper[4753]: I0129 12:07:06.688109 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:06 crc kubenswrapper[4753]: I0129 12:07:06.688146 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:06 crc kubenswrapper[4753]: I0129 12:07:06.688167 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:06Z","lastTransitionTime":"2026-01-29T12:07:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:06 crc kubenswrapper[4753]: I0129 12:07:06.790487 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-5m2jf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0203345d-1e9f-4cfe-bde7-90f87221d1a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fdf3d79084d3e17b1f8cb0b285fd425280f34724c6a441104a2ec52b5217bfdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zzwn6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:58Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-5m2jf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:06Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:06 crc kubenswrapper[4753]: I0129 12:07:06.791324 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:06 crc kubenswrapper[4753]: I0129 12:07:06.791372 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:06 crc kubenswrapper[4753]: I0129 12:07:06.791384 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:06 crc kubenswrapper[4753]: I0129 12:07:06.791403 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:06 crc kubenswrapper[4753]: I0129 12:07:06.791419 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:06Z","lastTransitionTime":"2026-01-29T12:07:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:07 crc kubenswrapper[4753]: I0129 12:07:06.905496 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-x8kzr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d753320a-61c2-4c0e-bd48-96d74b352114\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c7a667b7243fc36ff18cb4c017152fff3d5f4eb5e28e306ace44420cd722c5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tgns4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-x8kzr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:06Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:07 crc kubenswrapper[4753]: I0129 12:07:07.134817 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 12:07:07 crc kubenswrapper[4753]: E0129 12:07:07.134986 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 12:07:07 crc kubenswrapper[4753]: I0129 12:07:07.138074 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:07 crc kubenswrapper[4753]: I0129 12:07:07.138125 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:07 crc kubenswrapper[4753]: I0129 12:07:07.138136 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:07 crc kubenswrapper[4753]: I0129 12:07:07.138156 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:07 crc kubenswrapper[4753]: I0129 12:07:07.138167 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:07Z","lastTransitionTime":"2026-01-29T12:07:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:07 crc kubenswrapper[4753]: I0129 12:07:07.157168 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vwcjk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a99ab890-0a5a-4abb-86fb-a3731ff6b2c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd5300a3195f0f4d66a2789abcd65b4ade5652b5d05eb08bfd29932b97137464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd5300a3195f0f4d66a2789abcd65b4ade5652b5d05eb08bfd29932b97137464\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d95b548f1e6e754ff4a6f8d68364586d68b03662d0a679197fd0c99c41745691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:59Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vwcjk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:07Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:07 crc kubenswrapper[4753]: I0129 12:07:07.249331 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:07 crc kubenswrapper[4753]: I0129 12:07:07.249388 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:07 crc kubenswrapper[4753]: I0129 12:07:07.249399 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:07 crc kubenswrapper[4753]: I0129 12:07:07.249416 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:07 crc kubenswrapper[4753]: I0129 12:07:07.249426 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:07Z","lastTransitionTime":"2026-01-29T12:07:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:07 crc kubenswrapper[4753]: I0129 12:07:07.253775 4753 transport.go:147] "Certificate rotation detected, shutting down client connections to start using new credentials" Jan 29 12:07:07 crc kubenswrapper[4753]: I0129 12:07:07.279064 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" event={"ID":"80bec2ab-0a88-4818-9339-760edda3b07e","Type":"ContainerStarted","Data":"f71859413676dab4a435d43c11ad7cba315a12de744c3c8b161273f6ad36580c"} Jan 29 12:07:07 crc kubenswrapper[4753]: I0129 12:07:07.351618 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:07 crc kubenswrapper[4753]: I0129 12:07:07.351666 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:07 crc kubenswrapper[4753]: I0129 12:07:07.351675 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:07 crc kubenswrapper[4753]: I0129 12:07:07.351688 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:07 crc kubenswrapper[4753]: I0129 12:07:07.351698 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:07Z","lastTransitionTime":"2026-01-29T12:07:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:07 crc kubenswrapper[4753]: I0129 12:07:07.454646 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:07 crc kubenswrapper[4753]: I0129 12:07:07.454706 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:07 crc kubenswrapper[4753]: I0129 12:07:07.454723 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:07 crc kubenswrapper[4753]: I0129 12:07:07.454744 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:07 crc kubenswrapper[4753]: I0129 12:07:07.454754 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:07Z","lastTransitionTime":"2026-01-29T12:07:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:07 crc kubenswrapper[4753]: I0129 12:07:07.751633 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-26 20:19:37.484029938 +0000 UTC Jan 29 12:07:07 crc kubenswrapper[4753]: I0129 12:07:07.808171 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:07 crc kubenswrapper[4753]: I0129 12:07:07.808353 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:07 crc kubenswrapper[4753]: I0129 12:07:07.808370 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:07 crc kubenswrapper[4753]: I0129 12:07:07.808964 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:07 crc kubenswrapper[4753]: I0129 12:07:07.809020 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:07Z","lastTransitionTime":"2026-01-29T12:07:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:07 crc kubenswrapper[4753]: I0129 12:07:07.887753 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 12:07:07 crc kubenswrapper[4753]: E0129 12:07:07.887949 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 12:07:07 crc kubenswrapper[4753]: I0129 12:07:07.888344 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 12:07:07 crc kubenswrapper[4753]: E0129 12:07:07.888415 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 12:07:08 crc kubenswrapper[4753]: I0129 12:07:08.015351 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:08 crc kubenswrapper[4753]: I0129 12:07:08.015393 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:08 crc kubenswrapper[4753]: I0129 12:07:08.015404 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:08 crc kubenswrapper[4753]: I0129 12:07:08.015423 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:08 crc kubenswrapper[4753]: I0129 12:07:08.015434 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:08Z","lastTransitionTime":"2026-01-29T12:07:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:08 crc kubenswrapper[4753]: I0129 12:07:08.119565 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:08 crc kubenswrapper[4753]: I0129 12:07:08.119631 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:08 crc kubenswrapper[4753]: I0129 12:07:08.119644 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:08 crc kubenswrapper[4753]: I0129 12:07:08.119678 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:08 crc kubenswrapper[4753]: I0129 12:07:08.119689 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:08Z","lastTransitionTime":"2026-01-29T12:07:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:08 crc kubenswrapper[4753]: I0129 12:07:08.296266 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:08 crc kubenswrapper[4753]: I0129 12:07:08.296330 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:08 crc kubenswrapper[4753]: I0129 12:07:08.296341 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:08 crc kubenswrapper[4753]: I0129 12:07:08.296361 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:08 crc kubenswrapper[4753]: I0129 12:07:08.296370 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:08Z","lastTransitionTime":"2026-01-29T12:07:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:08 crc kubenswrapper[4753]: I0129 12:07:08.302405 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"703a6b9d-a15d-4ed3-b00b-db7bd5d42c61\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cab3ab80a8fddbc591cd07fdbed800f525bb8a1b5505bf818ebb59cbcce73003\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7af8be0836a8ff7d08ff48d9487acdedd4b83127538d071d13b3215400c0f8de\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91d57a9cede640bdcec325175598b72dd00398436b730248728c7d9afd545b1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://34151f1c4f743e8fa0a2c27e644bbea9c3911e2252501ad8aa95ad3e4346222a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34151f1c4f743e8fa0a2c27e644bbea9c3911e2252501ad8aa95ad3e4346222a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T12:06:54Z\\\",\\\"message\\\":\\\"ed a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3028279136/tls.crt::/tmp/serving-cert-3028279136/tls.key\\\\\\\"\\\\nI0129 12:06:54.190978 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 12:06:54.198044 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 12:06:54.198078 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 12:06:54.198139 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 12:06:54.198147 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 12:06:54.208089 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 12:06:54.208122 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 12:06:54.208128 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 12:06:54.208132 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 12:06:54.208135 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 12:06:54.208139 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 12:06:54.208143 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 12:06:54.208164 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI0129 12:06:54.212380 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-3028279136/tls.crt::/tmp/serving-cert-3028279136/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1769688397\\\\\\\\\\\\\\\" (2026-01-29 12:06:37 +0000 UTC to 2026-02-28 12:06:38 +0000 UTC (now=2026-01-29 12:06:54.212317535 +0000 UTC))\\\\\\\"\\\\nF0129 12:06:54.212498 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e02d977e34888952623f4ced5e073d7bf1ff360f8deb435dc577d2ecf146cb2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:33Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6538a934d097cd5a295952f64961a0a8fb5bfa5ebcd27a0a4dd3638029b9cdb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6538a934d097cd5a295952f64961a0a8fb5bfa5ebcd27a0a4dd3638029b9cdb1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:08Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:08 crc kubenswrapper[4753]: I0129 12:07:08.324842 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:08Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:08 crc kubenswrapper[4753]: I0129 12:07:08.345861 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:08Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:08 crc kubenswrapper[4753]: I0129 12:07:08.421550 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:08 crc kubenswrapper[4753]: I0129 12:07:08.421595 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:08 crc kubenswrapper[4753]: I0129 12:07:08.421605 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:08 crc kubenswrapper[4753]: I0129 12:07:08.421619 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:08 crc kubenswrapper[4753]: I0129 12:07:08.421628 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:08Z","lastTransitionTime":"2026-01-29T12:07:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:08 crc kubenswrapper[4753]: I0129 12:07:08.423615 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966eb8e59f82bacf20ebfa1b1d5bf9dc0a9cdb3b430ae83ed9bde106eb3fa521\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea7c92ec02a9cf6ec5fbd8efe22a066dfef9b48f042137964c5dd49578c99079\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:08Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:08 crc kubenswrapper[4753]: I0129 12:07:08.437579 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0310995-a7c7-47c3-ae6c-05daaaba92a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e56ac830346c0f85227bcebc9b76c2af8646767d4678934985d7b1f5846dcb82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-txdmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8391a68625fc3d8fdbc4f16b0b7b25e359798d08ce65ad8f482722910873418a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-txdmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:58Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7c24x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:08Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:08 crc kubenswrapper[4753]: I0129 12:07:08.484383 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rnbz9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b372210b-6e1b-4a80-b379-7c1d570712f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://346c36936603f4535d5ad3e6b60e451b05b4d376e2531c813f0c900c92caf5fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqzp6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:59Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rnbz9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:08Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:08 crc kubenswrapper[4753]: I0129 12:07:08.504671 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b108c546-a8c1-4bf9-814b-bfb5871b3013\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9842d9d6fed8c87bc20835964d3271b0be3117fedc658e6ef028db422d1ad478\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4fd4169036d3a646c9e5e4bfa8d86a92c557c3d8f77608ae5308dd0b8c45517\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b4a6bbb431e321e54013c96e3cca1537283cbdf65d89a978780550d603663fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://31bd7db7bddd3e16cb455f7af9fd3a9bca49919c8f1ffb676dec83c543ba7763\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:08Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:08 crc kubenswrapper[4753]: I0129 12:07:08.524502 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:08 crc kubenswrapper[4753]: I0129 12:07:08.524576 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:08 crc kubenswrapper[4753]: I0129 12:07:08.524608 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:08 crc kubenswrapper[4753]: I0129 12:07:08.524627 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:08 crc kubenswrapper[4753]: I0129 12:07:08.524636 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:08Z","lastTransitionTime":"2026-01-29T12:07:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:08 crc kubenswrapper[4753]: I0129 12:07:08.530833 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:08Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:08 crc kubenswrapper[4753]: I0129 12:07:08.554461 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5d7ccf10988fd83d31d42be6f82403067a0cc50313c4d5dcddfb528507efffa4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:08Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:08 crc kubenswrapper[4753]: I0129 12:07:08.588975 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80bec2ab-0a88-4818-9339-760edda3b07e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:07:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-nzkvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:08Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:08 crc kubenswrapper[4753]: I0129 12:07:08.605340 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5d7ccf10988fd83d31d42be6f82403067a0cc50313c4d5dcddfb528507efffa4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:08Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:08 crc kubenswrapper[4753]: I0129 12:07:08.627221 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:08 crc kubenswrapper[4753]: I0129 12:07:08.627291 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:08 crc kubenswrapper[4753]: I0129 12:07:08.627305 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:08 crc kubenswrapper[4753]: I0129 12:07:08.627327 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:08 crc kubenswrapper[4753]: I0129 12:07:08.627205 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80bec2ab-0a88-4818-9339-760edda3b07e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:07:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-nzkvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:08Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:08 crc kubenswrapper[4753]: I0129 12:07:08.627341 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:08Z","lastTransitionTime":"2026-01-29T12:07:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:08 crc kubenswrapper[4753]: I0129 12:07:08.646372 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7962d88e183df5c4d91f5767f2007068ea2fdfd5c31d9d6ed609c2ab9ca26999\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:08Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:08 crc kubenswrapper[4753]: I0129 12:07:08.668366 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-5m2jf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0203345d-1e9f-4cfe-bde7-90f87221d1a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fdf3d79084d3e17b1f8cb0b285fd425280f34724c6a441104a2ec52b5217bfdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zzwn6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:58Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-5m2jf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:08Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:08 crc kubenswrapper[4753]: I0129 12:07:08.686338 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"703a6b9d-a15d-4ed3-b00b-db7bd5d42c61\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cab3ab80a8fddbc591cd07fdbed800f525bb8a1b5505bf818ebb59cbcce73003\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7af8be0836a8ff7d08ff48d9487acdedd4b83127538d071d13b3215400c0f8de\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91d57a9cede640bdcec325175598b72dd00398436b730248728c7d9afd545b1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://34151f1c4f743e8fa0a2c27e644bbea9c3911e2252501ad8aa95ad3e4346222a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34151f1c4f743e8fa0a2c27e644bbea9c3911e2252501ad8aa95ad3e4346222a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T12:06:54Z\\\",\\\"message\\\":\\\"ed a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3028279136/tls.crt::/tmp/serving-cert-3028279136/tls.key\\\\\\\"\\\\nI0129 12:06:54.190978 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 12:06:54.198044 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 12:06:54.198078 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 12:06:54.198139 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 12:06:54.198147 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 12:06:54.208089 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 12:06:54.208122 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 12:06:54.208128 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 12:06:54.208132 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 12:06:54.208135 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 12:06:54.208139 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 12:06:54.208143 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 12:06:54.208164 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI0129 12:06:54.212380 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-3028279136/tls.crt::/tmp/serving-cert-3028279136/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1769688397\\\\\\\\\\\\\\\" (2026-01-29 12:06:37 +0000 UTC to 2026-02-28 12:06:38 +0000 UTC (now=2026-01-29 12:06:54.212317535 +0000 UTC))\\\\\\\"\\\\nF0129 12:06:54.212498 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e02d977e34888952623f4ced5e073d7bf1ff360f8deb435dc577d2ecf146cb2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:33Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6538a934d097cd5a295952f64961a0a8fb5bfa5ebcd27a0a4dd3638029b9cdb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6538a934d097cd5a295952f64961a0a8fb5bfa5ebcd27a0a4dd3638029b9cdb1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:08Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:08 crc kubenswrapper[4753]: I0129 12:07:08.710001 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:08Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:08 crc kubenswrapper[4753]: I0129 12:07:08.801036 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-12 04:04:44.938661862 +0000 UTC Jan 29 12:07:08 crc kubenswrapper[4753]: I0129 12:07:08.801106 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-x8kzr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d753320a-61c2-4c0e-bd48-96d74b352114\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c7a667b7243fc36ff18cb4c017152fff3d5f4eb5e28e306ace44420cd722c5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tgns4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-x8kzr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:08Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:08 crc kubenswrapper[4753]: I0129 12:07:08.805014 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:08 crc kubenswrapper[4753]: I0129 12:07:08.805066 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:08 crc kubenswrapper[4753]: I0129 12:07:08.805079 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:08 crc kubenswrapper[4753]: I0129 12:07:08.805099 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:08 crc kubenswrapper[4753]: I0129 12:07:08.805111 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:08Z","lastTransitionTime":"2026-01-29T12:07:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:08 crc kubenswrapper[4753]: I0129 12:07:08.820901 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vwcjk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a99ab890-0a5a-4abb-86fb-a3731ff6b2c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd5300a3195f0f4d66a2789abcd65b4ade5652b5d05eb08bfd29932b97137464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd5300a3195f0f4d66a2789abcd65b4ade5652b5d05eb08bfd29932b97137464\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d95b548f1e6e754ff4a6f8d68364586d68b03662d0a679197fd0c99c41745691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:59Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vwcjk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:08Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:08 crc kubenswrapper[4753]: I0129 12:07:08.837888 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0310995-a7c7-47c3-ae6c-05daaaba92a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e56ac830346c0f85227bcebc9b76c2af8646767d4678934985d7b1f5846dcb82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-txdmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8391a68625fc3d8fdbc4f16b0b7b25e359798d08ce65ad8f482722910873418a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-txdmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:58Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7c24x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:08Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:08 crc kubenswrapper[4753]: I0129 12:07:08.858182 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rnbz9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b372210b-6e1b-4a80-b379-7c1d570712f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://346c36936603f4535d5ad3e6b60e451b05b4d376e2531c813f0c900c92caf5fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqzp6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:59Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rnbz9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:08Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:08 crc kubenswrapper[4753]: I0129 12:07:08.880792 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b108c546-a8c1-4bf9-814b-bfb5871b3013\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9842d9d6fed8c87bc20835964d3271b0be3117fedc658e6ef028db422d1ad478\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4fd4169036d3a646c9e5e4bfa8d86a92c557c3d8f77608ae5308dd0b8c45517\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b4a6bbb431e321e54013c96e3cca1537283cbdf65d89a978780550d603663fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://31bd7db7bddd3e16cb455f7af9fd3a9bca49919c8f1ffb676dec83c543ba7763\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:08Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:08 crc kubenswrapper[4753]: I0129 12:07:08.887723 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 12:07:08 crc kubenswrapper[4753]: E0129 12:07:08.887869 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 12:07:08 crc kubenswrapper[4753]: I0129 12:07:08.903640 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:08Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:08 crc kubenswrapper[4753]: I0129 12:07:08.907938 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:08 crc kubenswrapper[4753]: I0129 12:07:08.907968 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:08 crc kubenswrapper[4753]: I0129 12:07:08.907976 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:08 crc kubenswrapper[4753]: I0129 12:07:08.907993 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:08 crc kubenswrapper[4753]: I0129 12:07:08.908002 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:08Z","lastTransitionTime":"2026-01-29T12:07:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:08 crc kubenswrapper[4753]: I0129 12:07:08.946018 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:08Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:08 crc kubenswrapper[4753]: I0129 12:07:08.961115 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966eb8e59f82bacf20ebfa1b1d5bf9dc0a9cdb3b430ae83ed9bde106eb3fa521\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea7c92ec02a9cf6ec5fbd8efe22a066dfef9b48f042137964c5dd49578c99079\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:08Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:09 crc kubenswrapper[4753]: I0129 12:07:09.011363 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:09 crc kubenswrapper[4753]: I0129 12:07:09.011401 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:09 crc kubenswrapper[4753]: I0129 12:07:09.011412 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:09 crc kubenswrapper[4753]: I0129 12:07:09.011430 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:09 crc kubenswrapper[4753]: I0129 12:07:09.011441 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:09Z","lastTransitionTime":"2026-01-29T12:07:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:09 crc kubenswrapper[4753]: I0129 12:07:09.114950 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:09 crc kubenswrapper[4753]: I0129 12:07:09.115031 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:09 crc kubenswrapper[4753]: I0129 12:07:09.115095 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:09 crc kubenswrapper[4753]: I0129 12:07:09.115121 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:09 crc kubenswrapper[4753]: I0129 12:07:09.115156 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:09Z","lastTransitionTime":"2026-01-29T12:07:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:09 crc kubenswrapper[4753]: I0129 12:07:09.458925 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:09 crc kubenswrapper[4753]: I0129 12:07:09.459269 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:09 crc kubenswrapper[4753]: I0129 12:07:09.459291 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:09 crc kubenswrapper[4753]: I0129 12:07:09.459309 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:09 crc kubenswrapper[4753]: I0129 12:07:09.459318 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:09Z","lastTransitionTime":"2026-01-29T12:07:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:09 crc kubenswrapper[4753]: I0129 12:07:09.633382 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:09 crc kubenswrapper[4753]: I0129 12:07:09.633459 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:09 crc kubenswrapper[4753]: I0129 12:07:09.633476 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:09 crc kubenswrapper[4753]: I0129 12:07:09.633501 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:09 crc kubenswrapper[4753]: I0129 12:07:09.633519 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:09Z","lastTransitionTime":"2026-01-29T12:07:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:09 crc kubenswrapper[4753]: I0129 12:07:09.736335 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:09 crc kubenswrapper[4753]: I0129 12:07:09.736394 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:09 crc kubenswrapper[4753]: I0129 12:07:09.736407 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:09 crc kubenswrapper[4753]: I0129 12:07:09.736424 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:09 crc kubenswrapper[4753]: I0129 12:07:09.736438 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:09Z","lastTransitionTime":"2026-01-29T12:07:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:09 crc kubenswrapper[4753]: I0129 12:07:09.801859 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-17 14:39:27.26308092 +0000 UTC Jan 29 12:07:09 crc kubenswrapper[4753]: I0129 12:07:09.839609 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:09 crc kubenswrapper[4753]: I0129 12:07:09.839647 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:09 crc kubenswrapper[4753]: I0129 12:07:09.839658 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:09 crc kubenswrapper[4753]: I0129 12:07:09.839676 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:09 crc kubenswrapper[4753]: I0129 12:07:09.839687 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:09Z","lastTransitionTime":"2026-01-29T12:07:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:09 crc kubenswrapper[4753]: I0129 12:07:09.887669 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 12:07:09 crc kubenswrapper[4753]: I0129 12:07:09.887828 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 12:07:09 crc kubenswrapper[4753]: E0129 12:07:09.887870 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 12:07:09 crc kubenswrapper[4753]: E0129 12:07:09.888003 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 12:07:09 crc kubenswrapper[4753]: I0129 12:07:09.942061 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:09 crc kubenswrapper[4753]: I0129 12:07:09.942106 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:09 crc kubenswrapper[4753]: I0129 12:07:09.942118 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:09 crc kubenswrapper[4753]: I0129 12:07:09.942135 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:09 crc kubenswrapper[4753]: I0129 12:07:09.942147 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:09Z","lastTransitionTime":"2026-01-29T12:07:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:10 crc kubenswrapper[4753]: I0129 12:07:10.044740 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:10 crc kubenswrapper[4753]: I0129 12:07:10.044780 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:10 crc kubenswrapper[4753]: I0129 12:07:10.044791 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:10 crc kubenswrapper[4753]: I0129 12:07:10.044806 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:10 crc kubenswrapper[4753]: I0129 12:07:10.044816 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:10Z","lastTransitionTime":"2026-01-29T12:07:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:10 crc kubenswrapper[4753]: I0129 12:07:10.184760 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:10 crc kubenswrapper[4753]: I0129 12:07:10.184806 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:10 crc kubenswrapper[4753]: I0129 12:07:10.184819 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:10 crc kubenswrapper[4753]: I0129 12:07:10.184838 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:10 crc kubenswrapper[4753]: I0129 12:07:10.184849 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:10Z","lastTransitionTime":"2026-01-29T12:07:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:10 crc kubenswrapper[4753]: I0129 12:07:10.556785 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:10 crc kubenswrapper[4753]: I0129 12:07:10.556835 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:10 crc kubenswrapper[4753]: I0129 12:07:10.556844 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:10 crc kubenswrapper[4753]: I0129 12:07:10.556860 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:10 crc kubenswrapper[4753]: I0129 12:07:10.556870 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:10Z","lastTransitionTime":"2026-01-29T12:07:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:10 crc kubenswrapper[4753]: I0129 12:07:10.559352 4753 generic.go:334] "Generic (PLEG): container finished" podID="a99ab890-0a5a-4abb-86fb-a3731ff6b2c1" containerID="d95b548f1e6e754ff4a6f8d68364586d68b03662d0a679197fd0c99c41745691" exitCode=0 Jan 29 12:07:10 crc kubenswrapper[4753]: I0129 12:07:10.559410 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-vwcjk" event={"ID":"a99ab890-0a5a-4abb-86fb-a3731ff6b2c1","Type":"ContainerDied","Data":"d95b548f1e6e754ff4a6f8d68364586d68b03662d0a679197fd0c99c41745691"} Jan 29 12:07:10 crc kubenswrapper[4753]: I0129 12:07:10.662962 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:10 crc kubenswrapper[4753]: I0129 12:07:10.663032 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:10 crc kubenswrapper[4753]: I0129 12:07:10.663056 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:10 crc kubenswrapper[4753]: I0129 12:07:10.663084 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:10 crc kubenswrapper[4753]: I0129 12:07:10.663106 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:10Z","lastTransitionTime":"2026-01-29T12:07:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:10 crc kubenswrapper[4753]: I0129 12:07:10.803131 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-08 05:38:16.429150731 +0000 UTC Jan 29 12:07:10 crc kubenswrapper[4753]: I0129 12:07:10.853379 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:07:10 crc kubenswrapper[4753]: E0129 12:07:10.853867 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:07:26.853801814 +0000 UTC m=+61.105883329 (durationBeforeRetry 16s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:07:10 crc kubenswrapper[4753]: I0129 12:07:10.888142 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 12:07:10 crc kubenswrapper[4753]: E0129 12:07:10.888464 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 12:07:10 crc kubenswrapper[4753]: I0129 12:07:10.910798 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:10 crc kubenswrapper[4753]: I0129 12:07:10.910855 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:10 crc kubenswrapper[4753]: I0129 12:07:10.910868 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:10 crc kubenswrapper[4753]: I0129 12:07:10.910889 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:10 crc kubenswrapper[4753]: I0129 12:07:10.910902 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:10Z","lastTransitionTime":"2026-01-29T12:07:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:10 crc kubenswrapper[4753]: I0129 12:07:10.927784 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5d7ccf10988fd83d31d42be6f82403067a0cc50313c4d5dcddfb528507efffa4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:10Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:10 crc kubenswrapper[4753]: I0129 12:07:10.971718 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80bec2ab-0a88-4818-9339-760edda3b07e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:07:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-nzkvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:10Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:10 crc kubenswrapper[4753]: I0129 12:07:10.990598 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-5m2jf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0203345d-1e9f-4cfe-bde7-90f87221d1a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fdf3d79084d3e17b1f8cb0b285fd425280f34724c6a441104a2ec52b5217bfdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zzwn6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:58Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-5m2jf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:10Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:11 crc kubenswrapper[4753]: I0129 12:07:11.008350 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7962d88e183df5c4d91f5767f2007068ea2fdfd5c31d9d6ed609c2ab9ca26999\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:11Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:11 crc kubenswrapper[4753]: I0129 12:07:11.027918 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:11Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:11 crc kubenswrapper[4753]: I0129 12:07:11.044996 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-x8kzr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d753320a-61c2-4c0e-bd48-96d74b352114\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c7a667b7243fc36ff18cb4c017152fff3d5f4eb5e28e306ace44420cd722c5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tgns4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-x8kzr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:11Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:11 crc kubenswrapper[4753]: I0129 12:07:11.074294 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vwcjk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a99ab890-0a5a-4abb-86fb-a3731ff6b2c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd5300a3195f0f4d66a2789abcd65b4ade5652b5d05eb08bfd29932b97137464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd5300a3195f0f4d66a2789abcd65b4ade5652b5d05eb08bfd29932b97137464\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d95b548f1e6e754ff4a6f8d68364586d68b03662d0a679197fd0c99c41745691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d95b548f1e6e754ff4a6f8d68364586d68b03662d0a679197fd0c99c41745691\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:59Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vwcjk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:11Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:11 crc kubenswrapper[4753]: I0129 12:07:11.100925 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 12:07:11 crc kubenswrapper[4753]: I0129 12:07:11.101163 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 12:07:11 crc kubenswrapper[4753]: E0129 12:07:11.322168 4753 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 29 12:07:11 crc kubenswrapper[4753]: E0129 12:07:11.322381 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-29 12:07:27.322361429 +0000 UTC m=+61.574442884 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 29 12:07:11 crc kubenswrapper[4753]: E0129 12:07:11.323511 4753 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 29 12:07:11 crc kubenswrapper[4753]: E0129 12:07:11.323655 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-29 12:07:27.323619167 +0000 UTC m=+61.575700622 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 29 12:07:11 crc kubenswrapper[4753]: I0129 12:07:11.329850 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:11 crc kubenswrapper[4753]: I0129 12:07:11.330134 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:11 crc kubenswrapper[4753]: I0129 12:07:11.330210 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:11 crc kubenswrapper[4753]: I0129 12:07:11.330314 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:11 crc kubenswrapper[4753]: I0129 12:07:11.330419 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:11Z","lastTransitionTime":"2026-01-29T12:07:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:11 crc kubenswrapper[4753]: I0129 12:07:11.330164 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"703a6b9d-a15d-4ed3-b00b-db7bd5d42c61\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cab3ab80a8fddbc591cd07fdbed800f525bb8a1b5505bf818ebb59cbcce73003\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7af8be0836a8ff7d08ff48d9487acdedd4b83127538d071d13b3215400c0f8de\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91d57a9cede640bdcec325175598b72dd00398436b730248728c7d9afd545b1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://34151f1c4f743e8fa0a2c27e644bbea9c3911e2252501ad8aa95ad3e4346222a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34151f1c4f743e8fa0a2c27e644bbea9c3911e2252501ad8aa95ad3e4346222a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T12:06:54Z\\\",\\\"message\\\":\\\"ed a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3028279136/tls.crt::/tmp/serving-cert-3028279136/tls.key\\\\\\\"\\\\nI0129 12:06:54.190978 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 12:06:54.198044 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 12:06:54.198078 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 12:06:54.198139 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 12:06:54.198147 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 12:06:54.208089 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 12:06:54.208122 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 12:06:54.208128 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 12:06:54.208132 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 12:06:54.208135 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 12:06:54.208139 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 12:06:54.208143 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 12:06:54.208164 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI0129 12:06:54.212380 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-3028279136/tls.crt::/tmp/serving-cert-3028279136/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1769688397\\\\\\\\\\\\\\\" (2026-01-29 12:06:37 +0000 UTC to 2026-02-28 12:06:38 +0000 UTC (now=2026-01-29 12:06:54.212317535 +0000 UTC))\\\\\\\"\\\\nF0129 12:06:54.212498 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e02d977e34888952623f4ced5e073d7bf1ff360f8deb435dc577d2ecf146cb2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:33Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6538a934d097cd5a295952f64961a0a8fb5bfa5ebcd27a0a4dd3638029b9cdb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6538a934d097cd5a295952f64961a0a8fb5bfa5ebcd27a0a4dd3638029b9cdb1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:11Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:11 crc kubenswrapper[4753]: I0129 12:07:11.350345 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:11Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:11 crc kubenswrapper[4753]: I0129 12:07:11.371053 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:11Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:11 crc kubenswrapper[4753]: I0129 12:07:11.391511 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966eb8e59f82bacf20ebfa1b1d5bf9dc0a9cdb3b430ae83ed9bde106eb3fa521\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea7c92ec02a9cf6ec5fbd8efe22a066dfef9b48f042137964c5dd49578c99079\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:11Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:11 crc kubenswrapper[4753]: I0129 12:07:11.410183 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0310995-a7c7-47c3-ae6c-05daaaba92a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e56ac830346c0f85227bcebc9b76c2af8646767d4678934985d7b1f5846dcb82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-txdmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8391a68625fc3d8fdbc4f16b0b7b25e359798d08ce65ad8f482722910873418a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-txdmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:58Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7c24x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:11Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:11 crc kubenswrapper[4753]: I0129 12:07:11.448879 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rnbz9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b372210b-6e1b-4a80-b379-7c1d570712f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://346c36936603f4535d5ad3e6b60e451b05b4d376e2531c813f0c900c92caf5fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqzp6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:59Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rnbz9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:11Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:11 crc kubenswrapper[4753]: I0129 12:07:11.473511 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b108c546-a8c1-4bf9-814b-bfb5871b3013\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9842d9d6fed8c87bc20835964d3271b0be3117fedc658e6ef028db422d1ad478\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4fd4169036d3a646c9e5e4bfa8d86a92c557c3d8f77608ae5308dd0b8c45517\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b4a6bbb431e321e54013c96e3cca1537283cbdf65d89a978780550d603663fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://31bd7db7bddd3e16cb455f7af9fd3a9bca49919c8f1ffb676dec83c543ba7763\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:11Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:11 crc kubenswrapper[4753]: I0129 12:07:11.498456 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:11 crc kubenswrapper[4753]: I0129 12:07:11.498519 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:11 crc kubenswrapper[4753]: I0129 12:07:11.498536 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:11 crc kubenswrapper[4753]: I0129 12:07:11.498565 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:11 crc kubenswrapper[4753]: I0129 12:07:11.498579 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:11Z","lastTransitionTime":"2026-01-29T12:07:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:11 crc kubenswrapper[4753]: I0129 12:07:11.583593 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" event={"ID":"80bec2ab-0a88-4818-9339-760edda3b07e","Type":"ContainerStarted","Data":"8e40e5b13dfdc135ea09253d466bacdfc5c2ac3d28eabc76d833d71ac300905c"} Jan 29 12:07:12 crc kubenswrapper[4753]: I0129 12:07:11.588841 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-vwcjk" event={"ID":"a99ab890-0a5a-4abb-86fb-a3731ff6b2c1","Type":"ContainerStarted","Data":"b8b1c7640bd4881ba79f3471e309b2d1538aea8d0c8e01d3d7cfc3677d95725a"} Jan 29 12:07:12 crc kubenswrapper[4753]: I0129 12:07:11.613020 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:12 crc kubenswrapper[4753]: I0129 12:07:11.613083 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:12 crc kubenswrapper[4753]: I0129 12:07:11.613098 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:12 crc kubenswrapper[4753]: I0129 12:07:11.613127 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:12 crc kubenswrapper[4753]: I0129 12:07:11.613140 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:11Z","lastTransitionTime":"2026-01-29T12:07:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:12 crc kubenswrapper[4753]: I0129 12:07:12.016524 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-28 11:14:49.21365735 +0000 UTC Jan 29 12:07:12 crc kubenswrapper[4753]: I0129 12:07:12.018933 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:12 crc kubenswrapper[4753]: I0129 12:07:12.018965 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:12 crc kubenswrapper[4753]: I0129 12:07:12.018976 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:12 crc kubenswrapper[4753]: I0129 12:07:12.019000 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:12 crc kubenswrapper[4753]: I0129 12:07:12.019012 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:12Z","lastTransitionTime":"2026-01-29T12:07:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:12 crc kubenswrapper[4753]: I0129 12:07:12.021615 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 12:07:12 crc kubenswrapper[4753]: I0129 12:07:12.021667 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 12:07:12 crc kubenswrapper[4753]: E0129 12:07:12.021847 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 12:07:12 crc kubenswrapper[4753]: E0129 12:07:12.022506 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 12:07:12 crc kubenswrapper[4753]: I0129 12:07:12.024191 4753 scope.go:117] "RemoveContainer" containerID="34151f1c4f743e8fa0a2c27e644bbea9c3911e2252501ad8aa95ad3e4346222a" Jan 29 12:07:12 crc kubenswrapper[4753]: I0129 12:07:12.034737 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5d7ccf10988fd83d31d42be6f82403067a0cc50313c4d5dcddfb528507efffa4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:12Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:12 crc kubenswrapper[4753]: E0129 12:07:12.046782 4753 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:12Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:12Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:12Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:12Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:12Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:12Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7536a030-b66d-4df9-a1ec-9890f7ad99e7\\\",\\\"systemUUID\\\":\\\"ce89c1ce-259c-4b78-b348-3ef96afb6944\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:12Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:12 crc kubenswrapper[4753]: I0129 12:07:12.052997 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:12 crc kubenswrapper[4753]: I0129 12:07:12.053062 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:12 crc kubenswrapper[4753]: I0129 12:07:12.053076 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:12 crc kubenswrapper[4753]: I0129 12:07:12.053100 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:12 crc kubenswrapper[4753]: I0129 12:07:12.053136 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:12Z","lastTransitionTime":"2026-01-29T12:07:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:12 crc kubenswrapper[4753]: I0129 12:07:12.067418 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80bec2ab-0a88-4818-9339-760edda3b07e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:07:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-nzkvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:12Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:12 crc kubenswrapper[4753]: E0129 12:07:12.068865 4753 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:12Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:12Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:12Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:12Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:12Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:12Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7536a030-b66d-4df9-a1ec-9890f7ad99e7\\\",\\\"systemUUID\\\":\\\"ce89c1ce-259c-4b78-b348-3ef96afb6944\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:12Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:12 crc kubenswrapper[4753]: I0129 12:07:12.082963 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:12 crc kubenswrapper[4753]: I0129 12:07:12.083015 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:12 crc kubenswrapper[4753]: I0129 12:07:12.083028 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:12 crc kubenswrapper[4753]: I0129 12:07:12.083100 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:12 crc kubenswrapper[4753]: I0129 12:07:12.083113 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:12Z","lastTransitionTime":"2026-01-29T12:07:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:12 crc kubenswrapper[4753]: I0129 12:07:12.551583 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 12:07:12 crc kubenswrapper[4753]: I0129 12:07:12.551701 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 12:07:12 crc kubenswrapper[4753]: E0129 12:07:12.551943 4753 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 29 12:07:12 crc kubenswrapper[4753]: E0129 12:07:12.551969 4753 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 29 12:07:12 crc kubenswrapper[4753]: E0129 12:07:12.552004 4753 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 12:07:12 crc kubenswrapper[4753]: E0129 12:07:12.552093 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-29 12:07:28.552066376 +0000 UTC m=+62.804147831 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 12:07:12 crc kubenswrapper[4753]: E0129 12:07:12.552266 4753 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 29 12:07:12 crc kubenswrapper[4753]: E0129 12:07:12.552292 4753 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 29 12:07:12 crc kubenswrapper[4753]: E0129 12:07:12.552314 4753 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 12:07:12 crc kubenswrapper[4753]: E0129 12:07:12.552362 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-29 12:07:28.552347585 +0000 UTC m=+62.804429040 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 12:07:12 crc kubenswrapper[4753]: I0129 12:07:12.581710 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7962d88e183df5c4d91f5767f2007068ea2fdfd5c31d9d6ed609c2ab9ca26999\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:12Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:12 crc kubenswrapper[4753]: E0129 12:07:12.661413 4753 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:12Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:12Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:12Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:12Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:12Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:12Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7536a030-b66d-4df9-a1ec-9890f7ad99e7\\\",\\\"systemUUID\\\":\\\"ce89c1ce-259c-4b78-b348-3ef96afb6944\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:12Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:12 crc kubenswrapper[4753]: I0129 12:07:12.671614 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:12 crc kubenswrapper[4753]: I0129 12:07:12.671693 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:12 crc kubenswrapper[4753]: I0129 12:07:12.671719 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:12 crc kubenswrapper[4753]: I0129 12:07:12.671742 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:12 crc kubenswrapper[4753]: I0129 12:07:12.671755 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:12Z","lastTransitionTime":"2026-01-29T12:07:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:12 crc kubenswrapper[4753]: I0129 12:07:12.674714 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-5m2jf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0203345d-1e9f-4cfe-bde7-90f87221d1a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fdf3d79084d3e17b1f8cb0b285fd425280f34724c6a441104a2ec52b5217bfdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zzwn6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:58Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-5m2jf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:12Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:12 crc kubenswrapper[4753]: E0129 12:07:12.689289 4753 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:12Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:12Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:12Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:12Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:12Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:12Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7536a030-b66d-4df9-a1ec-9890f7ad99e7\\\",\\\"systemUUID\\\":\\\"ce89c1ce-259c-4b78-b348-3ef96afb6944\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:12Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:12 crc kubenswrapper[4753]: I0129 12:07:12.698058 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:12 crc kubenswrapper[4753]: I0129 12:07:12.698189 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:12 crc kubenswrapper[4753]: I0129 12:07:12.698204 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:12 crc kubenswrapper[4753]: I0129 12:07:12.698253 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:12 crc kubenswrapper[4753]: I0129 12:07:12.698272 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:12Z","lastTransitionTime":"2026-01-29T12:07:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:12 crc kubenswrapper[4753]: I0129 12:07:12.707577 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"703a6b9d-a15d-4ed3-b00b-db7bd5d42c61\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cab3ab80a8fddbc591cd07fdbed800f525bb8a1b5505bf818ebb59cbcce73003\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7af8be0836a8ff7d08ff48d9487acdedd4b83127538d071d13b3215400c0f8de\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91d57a9cede640bdcec325175598b72dd00398436b730248728c7d9afd545b1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://34151f1c4f743e8fa0a2c27e644bbea9c3911e2252501ad8aa95ad3e4346222a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34151f1c4f743e8fa0a2c27e644bbea9c3911e2252501ad8aa95ad3e4346222a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T12:06:54Z\\\",\\\"message\\\":\\\"ed a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3028279136/tls.crt::/tmp/serving-cert-3028279136/tls.key\\\\\\\"\\\\nI0129 12:06:54.190978 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 12:06:54.198044 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 12:06:54.198078 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 12:06:54.198139 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 12:06:54.198147 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 12:06:54.208089 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 12:06:54.208122 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 12:06:54.208128 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 12:06:54.208132 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 12:06:54.208135 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 12:06:54.208139 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 12:06:54.208143 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 12:06:54.208164 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI0129 12:06:54.212380 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-3028279136/tls.crt::/tmp/serving-cert-3028279136/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1769688397\\\\\\\\\\\\\\\" (2026-01-29 12:06:37 +0000 UTC to 2026-02-28 12:06:38 +0000 UTC (now=2026-01-29 12:06:54.212317535 +0000 UTC))\\\\\\\"\\\\nF0129 12:06:54.212498 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e02d977e34888952623f4ced5e073d7bf1ff360f8deb435dc577d2ecf146cb2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:33Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6538a934d097cd5a295952f64961a0a8fb5bfa5ebcd27a0a4dd3638029b9cdb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6538a934d097cd5a295952f64961a0a8fb5bfa5ebcd27a0a4dd3638029b9cdb1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:12Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:12 crc kubenswrapper[4753]: E0129 12:07:12.714744 4753 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:12Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:12Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:12Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:12Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:12Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:12Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7536a030-b66d-4df9-a1ec-9890f7ad99e7\\\",\\\"systemUUID\\\":\\\"ce89c1ce-259c-4b78-b348-3ef96afb6944\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:12Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:12 crc kubenswrapper[4753]: E0129 12:07:12.714951 4753 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 29 12:07:12 crc kubenswrapper[4753]: I0129 12:07:12.717867 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:12 crc kubenswrapper[4753]: I0129 12:07:12.717910 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:12 crc kubenswrapper[4753]: I0129 12:07:12.717921 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:12 crc kubenswrapper[4753]: I0129 12:07:12.718023 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:12 crc kubenswrapper[4753]: I0129 12:07:12.718037 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:12Z","lastTransitionTime":"2026-01-29T12:07:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:12 crc kubenswrapper[4753]: I0129 12:07:12.726532 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:12Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:12 crc kubenswrapper[4753]: I0129 12:07:12.742941 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-x8kzr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d753320a-61c2-4c0e-bd48-96d74b352114\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c7a667b7243fc36ff18cb4c017152fff3d5f4eb5e28e306ace44420cd722c5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tgns4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-x8kzr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:12Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:12 crc kubenswrapper[4753]: I0129 12:07:12.763149 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vwcjk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a99ab890-0a5a-4abb-86fb-a3731ff6b2c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd5300a3195f0f4d66a2789abcd65b4ade5652b5d05eb08bfd29932b97137464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd5300a3195f0f4d66a2789abcd65b4ade5652b5d05eb08bfd29932b97137464\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d95b548f1e6e754ff4a6f8d68364586d68b03662d0a679197fd0c99c41745691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d95b548f1e6e754ff4a6f8d68364586d68b03662d0a679197fd0c99c41745691\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b8b1c7640bd4881ba79f3471e309b2d1538aea8d0c8e01d3d7cfc3677d95725a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:59Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vwcjk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:12Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:12 crc kubenswrapper[4753]: I0129 12:07:12.862319 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:12 crc kubenswrapper[4753]: I0129 12:07:12.862380 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:12 crc kubenswrapper[4753]: I0129 12:07:12.862393 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:12 crc kubenswrapper[4753]: I0129 12:07:12.862416 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:12 crc kubenswrapper[4753]: I0129 12:07:12.862427 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:12Z","lastTransitionTime":"2026-01-29T12:07:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:12 crc kubenswrapper[4753]: I0129 12:07:12.879547 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0310995-a7c7-47c3-ae6c-05daaaba92a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e56ac830346c0f85227bcebc9b76c2af8646767d4678934985d7b1f5846dcb82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-txdmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8391a68625fc3d8fdbc4f16b0b7b25e359798d08ce65ad8f482722910873418a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-txdmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:58Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7c24x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:12Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:12 crc kubenswrapper[4753]: I0129 12:07:12.888558 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 12:07:12 crc kubenswrapper[4753]: E0129 12:07:12.888789 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 12:07:12 crc kubenswrapper[4753]: I0129 12:07:12.910447 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rnbz9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b372210b-6e1b-4a80-b379-7c1d570712f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://346c36936603f4535d5ad3e6b60e451b05b4d376e2531c813f0c900c92caf5fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqzp6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:59Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rnbz9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:12Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:13 crc kubenswrapper[4753]: I0129 12:07:13.085969 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-23 17:06:08.719028837 +0000 UTC Jan 29 12:07:13 crc kubenswrapper[4753]: I0129 12:07:13.089675 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:13 crc kubenswrapper[4753]: I0129 12:07:13.090017 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:13 crc kubenswrapper[4753]: I0129 12:07:13.090121 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:13 crc kubenswrapper[4753]: I0129 12:07:13.090210 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:13 crc kubenswrapper[4753]: I0129 12:07:13.090351 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:13Z","lastTransitionTime":"2026-01-29T12:07:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:13 crc kubenswrapper[4753]: I0129 12:07:13.092053 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b108c546-a8c1-4bf9-814b-bfb5871b3013\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9842d9d6fed8c87bc20835964d3271b0be3117fedc658e6ef028db422d1ad478\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4fd4169036d3a646c9e5e4bfa8d86a92c557c3d8f77608ae5308dd0b8c45517\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b4a6bbb431e321e54013c96e3cca1537283cbdf65d89a978780550d603663fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://31bd7db7bddd3e16cb455f7af9fd3a9bca49919c8f1ffb676dec83c543ba7763\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:13Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:13 crc kubenswrapper[4753]: I0129 12:07:13.114371 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:13Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:13 crc kubenswrapper[4753]: I0129 12:07:13.135392 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:13Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:13 crc kubenswrapper[4753]: I0129 12:07:13.194774 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:13 crc kubenswrapper[4753]: I0129 12:07:13.195187 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:13 crc kubenswrapper[4753]: I0129 12:07:13.195307 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:13 crc kubenswrapper[4753]: I0129 12:07:13.195408 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:13 crc kubenswrapper[4753]: I0129 12:07:13.195485 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:13Z","lastTransitionTime":"2026-01-29T12:07:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:13 crc kubenswrapper[4753]: I0129 12:07:13.219489 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966eb8e59f82bacf20ebfa1b1d5bf9dc0a9cdb3b430ae83ed9bde106eb3fa521\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea7c92ec02a9cf6ec5fbd8efe22a066dfef9b48f042137964c5dd49578c99079\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:13Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:13 crc kubenswrapper[4753]: I0129 12:07:13.299696 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:13 crc kubenswrapper[4753]: I0129 12:07:13.299747 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:13 crc kubenswrapper[4753]: I0129 12:07:13.299759 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:13 crc kubenswrapper[4753]: I0129 12:07:13.299781 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:13 crc kubenswrapper[4753]: I0129 12:07:13.299793 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:13Z","lastTransitionTime":"2026-01-29T12:07:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:13 crc kubenswrapper[4753]: I0129 12:07:13.404091 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:13 crc kubenswrapper[4753]: I0129 12:07:13.404150 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:13 crc kubenswrapper[4753]: I0129 12:07:13.404164 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:13 crc kubenswrapper[4753]: I0129 12:07:13.404183 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:13 crc kubenswrapper[4753]: I0129 12:07:13.404192 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:13Z","lastTransitionTime":"2026-01-29T12:07:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:13 crc kubenswrapper[4753]: I0129 12:07:13.507184 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:13 crc kubenswrapper[4753]: I0129 12:07:13.507261 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:13 crc kubenswrapper[4753]: I0129 12:07:13.507276 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:13 crc kubenswrapper[4753]: I0129 12:07:13.507296 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:13 crc kubenswrapper[4753]: I0129 12:07:13.507312 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:13Z","lastTransitionTime":"2026-01-29T12:07:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:13 crc kubenswrapper[4753]: I0129 12:07:13.602180 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Jan 29 12:07:13 crc kubenswrapper[4753]: I0129 12:07:13.604837 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"e86597c91848f265cd70410d57ac60eadba1d0322b9b897097366a41c93d8134"} Jan 29 12:07:13 crc kubenswrapper[4753]: I0129 12:07:13.608602 4753 generic.go:334] "Generic (PLEG): container finished" podID="a99ab890-0a5a-4abb-86fb-a3731ff6b2c1" containerID="b8b1c7640bd4881ba79f3471e309b2d1538aea8d0c8e01d3d7cfc3677d95725a" exitCode=0 Jan 29 12:07:13 crc kubenswrapper[4753]: I0129 12:07:13.608708 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-vwcjk" event={"ID":"a99ab890-0a5a-4abb-86fb-a3731ff6b2c1","Type":"ContainerDied","Data":"b8b1c7640bd4881ba79f3471e309b2d1538aea8d0c8e01d3d7cfc3677d95725a"} Jan 29 12:07:13 crc kubenswrapper[4753]: I0129 12:07:13.609050 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:13 crc kubenswrapper[4753]: I0129 12:07:13.609110 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:13 crc kubenswrapper[4753]: I0129 12:07:13.609126 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:13 crc kubenswrapper[4753]: I0129 12:07:13.609153 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:13 crc kubenswrapper[4753]: I0129 12:07:13.609169 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:13Z","lastTransitionTime":"2026-01-29T12:07:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:13 crc kubenswrapper[4753]: I0129 12:07:13.632897 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0310995-a7c7-47c3-ae6c-05daaaba92a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e56ac830346c0f85227bcebc9b76c2af8646767d4678934985d7b1f5846dcb82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-txdmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8391a68625fc3d8fdbc4f16b0b7b25e359798d08ce65ad8f482722910873418a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-txdmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:58Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7c24x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:13Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:13 crc kubenswrapper[4753]: I0129 12:07:13.655865 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rnbz9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b372210b-6e1b-4a80-b379-7c1d570712f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://346c36936603f4535d5ad3e6b60e451b05b4d376e2531c813f0c900c92caf5fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqzp6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:59Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rnbz9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:13Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:13 crc kubenswrapper[4753]: I0129 12:07:13.723263 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:13 crc kubenswrapper[4753]: I0129 12:07:13.723329 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:13 crc kubenswrapper[4753]: I0129 12:07:13.723343 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:13 crc kubenswrapper[4753]: I0129 12:07:13.723363 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:13 crc kubenswrapper[4753]: I0129 12:07:13.723376 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:13Z","lastTransitionTime":"2026-01-29T12:07:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:13 crc kubenswrapper[4753]: I0129 12:07:13.958207 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b108c546-a8c1-4bf9-814b-bfb5871b3013\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9842d9d6fed8c87bc20835964d3271b0be3117fedc658e6ef028db422d1ad478\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4fd4169036d3a646c9e5e4bfa8d86a92c557c3d8f77608ae5308dd0b8c45517\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b4a6bbb431e321e54013c96e3cca1537283cbdf65d89a978780550d603663fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://31bd7db7bddd3e16cb455f7af9fd3a9bca49919c8f1ffb676dec83c543ba7763\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:13Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:13 crc kubenswrapper[4753]: I0129 12:07:13.994494 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 12:07:13 crc kubenswrapper[4753]: I0129 12:07:13.994561 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 12:07:13 crc kubenswrapper[4753]: E0129 12:07:13.994657 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 12:07:13 crc kubenswrapper[4753]: E0129 12:07:13.995142 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 12:07:14 crc kubenswrapper[4753]: I0129 12:07:14.000098 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:14 crc kubenswrapper[4753]: I0129 12:07:14.000153 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:14 crc kubenswrapper[4753]: I0129 12:07:14.000164 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:14 crc kubenswrapper[4753]: I0129 12:07:14.000186 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:14 crc kubenswrapper[4753]: I0129 12:07:14.000204 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:14Z","lastTransitionTime":"2026-01-29T12:07:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:14 crc kubenswrapper[4753]: I0129 12:07:14.005476 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:14Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:14 crc kubenswrapper[4753]: I0129 12:07:14.034863 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:14Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:14 crc kubenswrapper[4753]: I0129 12:07:14.061855 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966eb8e59f82bacf20ebfa1b1d5bf9dc0a9cdb3b430ae83ed9bde106eb3fa521\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea7c92ec02a9cf6ec5fbd8efe22a066dfef9b48f042137964c5dd49578c99079\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:14Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:14 crc kubenswrapper[4753]: I0129 12:07:14.117809 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-20 04:11:50.937613045 +0000 UTC Jan 29 12:07:14 crc kubenswrapper[4753]: I0129 12:07:14.121684 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:14 crc kubenswrapper[4753]: I0129 12:07:14.121725 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:14 crc kubenswrapper[4753]: I0129 12:07:14.121734 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:14 crc kubenswrapper[4753]: I0129 12:07:14.121755 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:14 crc kubenswrapper[4753]: I0129 12:07:14.121768 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:14Z","lastTransitionTime":"2026-01-29T12:07:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:14 crc kubenswrapper[4753]: I0129 12:07:14.122197 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5d7ccf10988fd83d31d42be6f82403067a0cc50313c4d5dcddfb528507efffa4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:14Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:14 crc kubenswrapper[4753]: I0129 12:07:14.150309 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80bec2ab-0a88-4818-9339-760edda3b07e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:07:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-nzkvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:14Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:14 crc kubenswrapper[4753]: I0129 12:07:14.170114 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7962d88e183df5c4d91f5767f2007068ea2fdfd5c31d9d6ed609c2ab9ca26999\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:14Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:14 crc kubenswrapper[4753]: I0129 12:07:14.189212 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-5m2jf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0203345d-1e9f-4cfe-bde7-90f87221d1a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fdf3d79084d3e17b1f8cb0b285fd425280f34724c6a441104a2ec52b5217bfdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zzwn6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:58Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-5m2jf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:14Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:14 crc kubenswrapper[4753]: I0129 12:07:14.212219 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"703a6b9d-a15d-4ed3-b00b-db7bd5d42c61\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cab3ab80a8fddbc591cd07fdbed800f525bb8a1b5505bf818ebb59cbcce73003\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7af8be0836a8ff7d08ff48d9487acdedd4b83127538d071d13b3215400c0f8de\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91d57a9cede640bdcec325175598b72dd00398436b730248728c7d9afd545b1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e86597c91848f265cd70410d57ac60eadba1d0322b9b897097366a41c93d8134\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34151f1c4f743e8fa0a2c27e644bbea9c3911e2252501ad8aa95ad3e4346222a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T12:06:54Z\\\",\\\"message\\\":\\\"ed a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3028279136/tls.crt::/tmp/serving-cert-3028279136/tls.key\\\\\\\"\\\\nI0129 12:06:54.190978 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 12:06:54.198044 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 12:06:54.198078 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 12:06:54.198139 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 12:06:54.198147 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 12:06:54.208089 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 12:06:54.208122 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 12:06:54.208128 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 12:06:54.208132 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 12:06:54.208135 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 12:06:54.208139 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 12:06:54.208143 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 12:06:54.208164 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI0129 12:06:54.212380 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-3028279136/tls.crt::/tmp/serving-cert-3028279136/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1769688397\\\\\\\\\\\\\\\" (2026-01-29 12:06:37 +0000 UTC to 2026-02-28 12:06:38 +0000 UTC (now=2026-01-29 12:06:54.212317535 +0000 UTC))\\\\\\\"\\\\nF0129 12:06:54.212498 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e02d977e34888952623f4ced5e073d7bf1ff360f8deb435dc577d2ecf146cb2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:33Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6538a934d097cd5a295952f64961a0a8fb5bfa5ebcd27a0a4dd3638029b9cdb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6538a934d097cd5a295952f64961a0a8fb5bfa5ebcd27a0a4dd3638029b9cdb1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:14Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:14 crc kubenswrapper[4753]: I0129 12:07:14.225389 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:14 crc kubenswrapper[4753]: I0129 12:07:14.225435 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:14 crc kubenswrapper[4753]: I0129 12:07:14.225447 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:14 crc kubenswrapper[4753]: I0129 12:07:14.225465 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:14 crc kubenswrapper[4753]: I0129 12:07:14.225475 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:14Z","lastTransitionTime":"2026-01-29T12:07:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:14 crc kubenswrapper[4753]: I0129 12:07:14.233148 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:14Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:14 crc kubenswrapper[4753]: I0129 12:07:14.247969 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-x8kzr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d753320a-61c2-4c0e-bd48-96d74b352114\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c7a667b7243fc36ff18cb4c017152fff3d5f4eb5e28e306ace44420cd722c5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tgns4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-x8kzr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:14Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:14 crc kubenswrapper[4753]: I0129 12:07:14.272499 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vwcjk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a99ab890-0a5a-4abb-86fb-a3731ff6b2c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd5300a3195f0f4d66a2789abcd65b4ade5652b5d05eb08bfd29932b97137464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd5300a3195f0f4d66a2789abcd65b4ade5652b5d05eb08bfd29932b97137464\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d95b548f1e6e754ff4a6f8d68364586d68b03662d0a679197fd0c99c41745691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d95b548f1e6e754ff4a6f8d68364586d68b03662d0a679197fd0c99c41745691\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b8b1c7640bd4881ba79f3471e309b2d1538aea8d0c8e01d3d7cfc3677d95725a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:59Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vwcjk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:14Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:14 crc kubenswrapper[4753]: I0129 12:07:14.291485 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5d7ccf10988fd83d31d42be6f82403067a0cc50313c4d5dcddfb528507efffa4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:14Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:14 crc kubenswrapper[4753]: I0129 12:07:14.320897 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80bec2ab-0a88-4818-9339-760edda3b07e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:07:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-nzkvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:14Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:14 crc kubenswrapper[4753]: I0129 12:07:14.329058 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:14 crc kubenswrapper[4753]: I0129 12:07:14.329165 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:14 crc kubenswrapper[4753]: I0129 12:07:14.329178 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:14 crc kubenswrapper[4753]: I0129 12:07:14.329205 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:14 crc kubenswrapper[4753]: I0129 12:07:14.329219 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:14Z","lastTransitionTime":"2026-01-29T12:07:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:14 crc kubenswrapper[4753]: I0129 12:07:14.340575 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-5m2jf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0203345d-1e9f-4cfe-bde7-90f87221d1a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fdf3d79084d3e17b1f8cb0b285fd425280f34724c6a441104a2ec52b5217bfdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zzwn6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:58Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-5m2jf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:14Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:14 crc kubenswrapper[4753]: I0129 12:07:14.361512 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7962d88e183df5c4d91f5767f2007068ea2fdfd5c31d9d6ed609c2ab9ca26999\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:14Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:14 crc kubenswrapper[4753]: I0129 12:07:14.381304 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:14Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:14 crc kubenswrapper[4753]: I0129 12:07:14.407109 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-x8kzr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d753320a-61c2-4c0e-bd48-96d74b352114\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c7a667b7243fc36ff18cb4c017152fff3d5f4eb5e28e306ace44420cd722c5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tgns4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-x8kzr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:14Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:14 crc kubenswrapper[4753]: I0129 12:07:14.432496 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vwcjk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a99ab890-0a5a-4abb-86fb-a3731ff6b2c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd5300a3195f0f4d66a2789abcd65b4ade5652b5d05eb08bfd29932b97137464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd5300a3195f0f4d66a2789abcd65b4ade5652b5d05eb08bfd29932b97137464\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d95b548f1e6e754ff4a6f8d68364586d68b03662d0a679197fd0c99c41745691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d95b548f1e6e754ff4a6f8d68364586d68b03662d0a679197fd0c99c41745691\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b8b1c7640bd4881ba79f3471e309b2d1538aea8d0c8e01d3d7cfc3677d95725a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b8b1c7640bd4881ba79f3471e309b2d1538aea8d0c8e01d3d7cfc3677d95725a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:59Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vwcjk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:14Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:14 crc kubenswrapper[4753]: I0129 12:07:14.432900 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:14 crc kubenswrapper[4753]: I0129 12:07:14.433087 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:14 crc kubenswrapper[4753]: I0129 12:07:14.433099 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:14 crc kubenswrapper[4753]: I0129 12:07:14.433122 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:14 crc kubenswrapper[4753]: I0129 12:07:14.433138 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:14Z","lastTransitionTime":"2026-01-29T12:07:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:14 crc kubenswrapper[4753]: I0129 12:07:14.549001 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:14 crc kubenswrapper[4753]: I0129 12:07:14.549052 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:14 crc kubenswrapper[4753]: I0129 12:07:14.549069 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:14 crc kubenswrapper[4753]: I0129 12:07:14.549090 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:14 crc kubenswrapper[4753]: I0129 12:07:14.549104 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:14Z","lastTransitionTime":"2026-01-29T12:07:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:14 crc kubenswrapper[4753]: I0129 12:07:14.555046 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"703a6b9d-a15d-4ed3-b00b-db7bd5d42c61\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cab3ab80a8fddbc591cd07fdbed800f525bb8a1b5505bf818ebb59cbcce73003\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7af8be0836a8ff7d08ff48d9487acdedd4b83127538d071d13b3215400c0f8de\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91d57a9cede640bdcec325175598b72dd00398436b730248728c7d9afd545b1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e86597c91848f265cd70410d57ac60eadba1d0322b9b897097366a41c93d8134\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34151f1c4f743e8fa0a2c27e644bbea9c3911e2252501ad8aa95ad3e4346222a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T12:06:54Z\\\",\\\"message\\\":\\\"ed a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3028279136/tls.crt::/tmp/serving-cert-3028279136/tls.key\\\\\\\"\\\\nI0129 12:06:54.190978 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 12:06:54.198044 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 12:06:54.198078 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 12:06:54.198139 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 12:06:54.198147 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 12:06:54.208089 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 12:06:54.208122 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 12:06:54.208128 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 12:06:54.208132 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 12:06:54.208135 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 12:06:54.208139 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 12:06:54.208143 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 12:06:54.208164 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI0129 12:06:54.212380 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-3028279136/tls.crt::/tmp/serving-cert-3028279136/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1769688397\\\\\\\\\\\\\\\" (2026-01-29 12:06:37 +0000 UTC to 2026-02-28 12:06:38 +0000 UTC (now=2026-01-29 12:06:54.212317535 +0000 UTC))\\\\\\\"\\\\nF0129 12:06:54.212498 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e02d977e34888952623f4ced5e073d7bf1ff360f8deb435dc577d2ecf146cb2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:33Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6538a934d097cd5a295952f64961a0a8fb5bfa5ebcd27a0a4dd3638029b9cdb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6538a934d097cd5a295952f64961a0a8fb5bfa5ebcd27a0a4dd3638029b9cdb1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:14Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:14 crc kubenswrapper[4753]: I0129 12:07:14.575161 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:14Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:14 crc kubenswrapper[4753]: I0129 12:07:14.596099 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:14Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:14 crc kubenswrapper[4753]: I0129 12:07:14.617334 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966eb8e59f82bacf20ebfa1b1d5bf9dc0a9cdb3b430ae83ed9bde106eb3fa521\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea7c92ec02a9cf6ec5fbd8efe22a066dfef9b48f042137964c5dd49578c99079\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:14Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:14 crc kubenswrapper[4753]: I0129 12:07:14.617521 4753 generic.go:334] "Generic (PLEG): container finished" podID="a99ab890-0a5a-4abb-86fb-a3731ff6b2c1" containerID="d0ba51b6e2180da460faa3599850175db9d7410880ba0214425fa275803f36d3" exitCode=0 Jan 29 12:07:14 crc kubenswrapper[4753]: I0129 12:07:14.617607 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-vwcjk" event={"ID":"a99ab890-0a5a-4abb-86fb-a3731ff6b2c1","Type":"ContainerDied","Data":"d0ba51b6e2180da460faa3599850175db9d7410880ba0214425fa275803f36d3"} Jan 29 12:07:14 crc kubenswrapper[4753]: I0129 12:07:14.629515 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" event={"ID":"80bec2ab-0a88-4818-9339-760edda3b07e","Type":"ContainerStarted","Data":"34d19c3c88e356688bc3f0dbe54c1a46ef5e5f5fedbf84c5cb536b3c272decb0"} Jan 29 12:07:14 crc kubenswrapper[4753]: I0129 12:07:14.630991 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" Jan 29 12:07:14 crc kubenswrapper[4753]: I0129 12:07:14.631108 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" Jan 29 12:07:14 crc kubenswrapper[4753]: I0129 12:07:14.631181 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" Jan 29 12:07:14 crc kubenswrapper[4753]: I0129 12:07:14.635556 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0310995-a7c7-47c3-ae6c-05daaaba92a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e56ac830346c0f85227bcebc9b76c2af8646767d4678934985d7b1f5846dcb82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-txdmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8391a68625fc3d8fdbc4f16b0b7b25e359798d08ce65ad8f482722910873418a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-txdmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:58Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7c24x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:14Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:14 crc kubenswrapper[4753]: I0129 12:07:14.654431 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:14 crc kubenswrapper[4753]: I0129 12:07:14.654486 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:14 crc kubenswrapper[4753]: I0129 12:07:14.654500 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:14 crc kubenswrapper[4753]: I0129 12:07:14.654527 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:14 crc kubenswrapper[4753]: I0129 12:07:14.654542 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:14Z","lastTransitionTime":"2026-01-29T12:07:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:14 crc kubenswrapper[4753]: I0129 12:07:14.685203 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rnbz9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b372210b-6e1b-4a80-b379-7c1d570712f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://346c36936603f4535d5ad3e6b60e451b05b4d376e2531c813f0c900c92caf5fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqzp6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:59Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rnbz9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:14Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:14 crc kubenswrapper[4753]: I0129 12:07:14.962389 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 12:07:14 crc kubenswrapper[4753]: E0129 12:07:14.963713 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 12:07:14 crc kubenswrapper[4753]: I0129 12:07:14.973997 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:14 crc kubenswrapper[4753]: I0129 12:07:14.974081 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" Jan 29 12:07:14 crc kubenswrapper[4753]: I0129 12:07:14.974110 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:14 crc kubenswrapper[4753]: I0129 12:07:14.974285 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:14 crc kubenswrapper[4753]: I0129 12:07:14.974558 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:14 crc kubenswrapper[4753]: I0129 12:07:14.974663 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:14Z","lastTransitionTime":"2026-01-29T12:07:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:15 crc kubenswrapper[4753]: I0129 12:07:15.118160 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-19 08:49:10.395688834 +0000 UTC Jan 29 12:07:15 crc kubenswrapper[4753]: I0129 12:07:15.172073 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b108c546-a8c1-4bf9-814b-bfb5871b3013\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9842d9d6fed8c87bc20835964d3271b0be3117fedc658e6ef028db422d1ad478\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4fd4169036d3a646c9e5e4bfa8d86a92c557c3d8f77608ae5308dd0b8c45517\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b4a6bbb431e321e54013c96e3cca1537283cbdf65d89a978780550d603663fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://31bd7db7bddd3e16cb455f7af9fd3a9bca49919c8f1ffb676dec83c543ba7763\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:14Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:15 crc kubenswrapper[4753]: I0129 12:07:15.175444 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:15 crc kubenswrapper[4753]: I0129 12:07:15.175520 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:15 crc kubenswrapper[4753]: I0129 12:07:15.175570 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:15 crc kubenswrapper[4753]: I0129 12:07:15.175591 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:15 crc kubenswrapper[4753]: I0129 12:07:15.175602 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:15Z","lastTransitionTime":"2026-01-29T12:07:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:15 crc kubenswrapper[4753]: I0129 12:07:15.177599 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" Jan 29 12:07:15 crc kubenswrapper[4753]: I0129 12:07:15.197579 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5d7ccf10988fd83d31d42be6f82403067a0cc50313c4d5dcddfb528507efffa4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:15Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:15 crc kubenswrapper[4753]: I0129 12:07:15.225904 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80bec2ab-0a88-4818-9339-760edda3b07e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f15016e29f1fc14f51b4d01e772071333fd8a885cacc1804b3a03dbcfc93836\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1e6a7d8df812546f17b3e026005c5062086e97e3fa364b3fa22cefec025e4eba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f71859413676dab4a435d43c11ad7cba315a12de744c3c8b161273f6ad36580c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0ec2a21ec299b5eed363668f39a270a8b03ea79b9de3dd23bda0c30579c3de3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91c1df8b6763320498420463a3841f0bfa4fcf753c355f549c3c1e93bf5206a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://05d1e99596530f467ecdae51aec8db191ac045cd85e121fc9835e2f3688f6ae7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://34d19c3c88e356688bc3f0dbe54c1a46ef5e5f5fedbf84c5cb536b3c272decb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e40e5b13dfdc135ea09253d466bacdfc5c2ac3d28eabc76d833d71ac300905c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:07:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-nzkvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:15Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:15 crc kubenswrapper[4753]: I0129 12:07:15.246827 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7962d88e183df5c4d91f5767f2007068ea2fdfd5c31d9d6ed609c2ab9ca26999\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:15Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:15 crc kubenswrapper[4753]: I0129 12:07:15.265881 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-5m2jf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0203345d-1e9f-4cfe-bde7-90f87221d1a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fdf3d79084d3e17b1f8cb0b285fd425280f34724c6a441104a2ec52b5217bfdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zzwn6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:58Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-5m2jf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:15Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:15 crc kubenswrapper[4753]: I0129 12:07:15.278789 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:15 crc kubenswrapper[4753]: I0129 12:07:15.278828 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:15 crc kubenswrapper[4753]: I0129 12:07:15.278837 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:15 crc kubenswrapper[4753]: I0129 12:07:15.278856 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:15 crc kubenswrapper[4753]: I0129 12:07:15.278867 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:15Z","lastTransitionTime":"2026-01-29T12:07:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:15 crc kubenswrapper[4753]: I0129 12:07:15.287666 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"703a6b9d-a15d-4ed3-b00b-db7bd5d42c61\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cab3ab80a8fddbc591cd07fdbed800f525bb8a1b5505bf818ebb59cbcce73003\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7af8be0836a8ff7d08ff48d9487acdedd4b83127538d071d13b3215400c0f8de\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91d57a9cede640bdcec325175598b72dd00398436b730248728c7d9afd545b1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e86597c91848f265cd70410d57ac60eadba1d0322b9b897097366a41c93d8134\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34151f1c4f743e8fa0a2c27e644bbea9c3911e2252501ad8aa95ad3e4346222a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T12:06:54Z\\\",\\\"message\\\":\\\"ed a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3028279136/tls.crt::/tmp/serving-cert-3028279136/tls.key\\\\\\\"\\\\nI0129 12:06:54.190978 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 12:06:54.198044 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 12:06:54.198078 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 12:06:54.198139 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 12:06:54.198147 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 12:06:54.208089 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 12:06:54.208122 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 12:06:54.208128 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 12:06:54.208132 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 12:06:54.208135 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 12:06:54.208139 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 12:06:54.208143 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 12:06:54.208164 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI0129 12:06:54.212380 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-3028279136/tls.crt::/tmp/serving-cert-3028279136/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1769688397\\\\\\\\\\\\\\\" (2026-01-29 12:06:37 +0000 UTC to 2026-02-28 12:06:38 +0000 UTC (now=2026-01-29 12:06:54.212317535 +0000 UTC))\\\\\\\"\\\\nF0129 12:06:54.212498 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e02d977e34888952623f4ced5e073d7bf1ff360f8deb435dc577d2ecf146cb2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:33Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6538a934d097cd5a295952f64961a0a8fb5bfa5ebcd27a0a4dd3638029b9cdb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6538a934d097cd5a295952f64961a0a8fb5bfa5ebcd27a0a4dd3638029b9cdb1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:15Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:15 crc kubenswrapper[4753]: I0129 12:07:15.307087 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:15Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:15 crc kubenswrapper[4753]: I0129 12:07:15.323820 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-x8kzr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d753320a-61c2-4c0e-bd48-96d74b352114\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c7a667b7243fc36ff18cb4c017152fff3d5f4eb5e28e306ace44420cd722c5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tgns4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-x8kzr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:15Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:15 crc kubenswrapper[4753]: I0129 12:07:15.347436 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vwcjk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a99ab890-0a5a-4abb-86fb-a3731ff6b2c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd5300a3195f0f4d66a2789abcd65b4ade5652b5d05eb08bfd29932b97137464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd5300a3195f0f4d66a2789abcd65b4ade5652b5d05eb08bfd29932b97137464\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d95b548f1e6e754ff4a6f8d68364586d68b03662d0a679197fd0c99c41745691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d95b548f1e6e754ff4a6f8d68364586d68b03662d0a679197fd0c99c41745691\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b8b1c7640bd4881ba79f3471e309b2d1538aea8d0c8e01d3d7cfc3677d95725a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b8b1c7640bd4881ba79f3471e309b2d1538aea8d0c8e01d3d7cfc3677d95725a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0ba51b6e2180da460faa3599850175db9d7410880ba0214425fa275803f36d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0ba51b6e2180da460faa3599850175db9d7410880ba0214425fa275803f36d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:59Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vwcjk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:15Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:15 crc kubenswrapper[4753]: I0129 12:07:15.366217 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0310995-a7c7-47c3-ae6c-05daaaba92a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e56ac830346c0f85227bcebc9b76c2af8646767d4678934985d7b1f5846dcb82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-txdmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8391a68625fc3d8fdbc4f16b0b7b25e359798d08ce65ad8f482722910873418a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-txdmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:58Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7c24x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:15Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:15 crc kubenswrapper[4753]: I0129 12:07:15.383276 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:15 crc kubenswrapper[4753]: I0129 12:07:15.383320 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:15 crc kubenswrapper[4753]: I0129 12:07:15.383330 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:15 crc kubenswrapper[4753]: I0129 12:07:15.383351 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:15 crc kubenswrapper[4753]: I0129 12:07:15.383361 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:15Z","lastTransitionTime":"2026-01-29T12:07:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:15 crc kubenswrapper[4753]: I0129 12:07:15.489197 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:15 crc kubenswrapper[4753]: I0129 12:07:15.489276 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:15 crc kubenswrapper[4753]: I0129 12:07:15.489296 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:15 crc kubenswrapper[4753]: I0129 12:07:15.489462 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:15 crc kubenswrapper[4753]: I0129 12:07:15.489483 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:15Z","lastTransitionTime":"2026-01-29T12:07:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:15 crc kubenswrapper[4753]: I0129 12:07:15.783807 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:15 crc kubenswrapper[4753]: I0129 12:07:15.783843 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:15 crc kubenswrapper[4753]: I0129 12:07:15.783855 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:15 crc kubenswrapper[4753]: I0129 12:07:15.783884 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:15 crc kubenswrapper[4753]: I0129 12:07:15.783895 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:15Z","lastTransitionTime":"2026-01-29T12:07:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:15 crc kubenswrapper[4753]: I0129 12:07:15.794228 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rnbz9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b372210b-6e1b-4a80-b379-7c1d570712f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://346c36936603f4535d5ad3e6b60e451b05b4d376e2531c813f0c900c92caf5fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqzp6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:59Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rnbz9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:15Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:15 crc kubenswrapper[4753]: I0129 12:07:15.815052 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b108c546-a8c1-4bf9-814b-bfb5871b3013\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9842d9d6fed8c87bc20835964d3271b0be3117fedc658e6ef028db422d1ad478\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4fd4169036d3a646c9e5e4bfa8d86a92c557c3d8f77608ae5308dd0b8c45517\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b4a6bbb431e321e54013c96e3cca1537283cbdf65d89a978780550d603663fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://31bd7db7bddd3e16cb455f7af9fd3a9bca49919c8f1ffb676dec83c543ba7763\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:15Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:15 crc kubenswrapper[4753]: I0129 12:07:15.834019 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:15Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:15 crc kubenswrapper[4753]: I0129 12:07:15.854260 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:15Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:15 crc kubenswrapper[4753]: I0129 12:07:15.876891 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966eb8e59f82bacf20ebfa1b1d5bf9dc0a9cdb3b430ae83ed9bde106eb3fa521\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea7c92ec02a9cf6ec5fbd8efe22a066dfef9b48f042137964c5dd49578c99079\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:15Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:15 crc kubenswrapper[4753]: I0129 12:07:15.887751 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:15 crc kubenswrapper[4753]: I0129 12:07:15.887790 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 12:07:15 crc kubenswrapper[4753]: I0129 12:07:15.887818 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:15 crc kubenswrapper[4753]: I0129 12:07:15.887830 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:15 crc kubenswrapper[4753]: I0129 12:07:15.887853 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:15 crc kubenswrapper[4753]: I0129 12:07:15.887870 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:15Z","lastTransitionTime":"2026-01-29T12:07:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:15 crc kubenswrapper[4753]: I0129 12:07:15.887894 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 12:07:15 crc kubenswrapper[4753]: E0129 12:07:15.887955 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 12:07:15 crc kubenswrapper[4753]: E0129 12:07:15.888126 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 12:07:15 crc kubenswrapper[4753]: I0129 12:07:15.897533 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5d7ccf10988fd83d31d42be6f82403067a0cc50313c4d5dcddfb528507efffa4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:15Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:15 crc kubenswrapper[4753]: I0129 12:07:15.920398 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80bec2ab-0a88-4818-9339-760edda3b07e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f15016e29f1fc14f51b4d01e772071333fd8a885cacc1804b3a03dbcfc93836\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1e6a7d8df812546f17b3e026005c5062086e97e3fa364b3fa22cefec025e4eba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f71859413676dab4a435d43c11ad7cba315a12de744c3c8b161273f6ad36580c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0ec2a21ec299b5eed363668f39a270a8b03ea79b9de3dd23bda0c30579c3de3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91c1df8b6763320498420463a3841f0bfa4fcf753c355f549c3c1e93bf5206a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://05d1e99596530f467ecdae51aec8db191ac045cd85e121fc9835e2f3688f6ae7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://34d19c3c88e356688bc3f0dbe54c1a46ef5e5f5fedbf84c5cb536b3c272decb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e40e5b13dfdc135ea09253d466bacdfc5c2ac3d28eabc76d833d71ac300905c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:07:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-nzkvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:15Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:15 crc kubenswrapper[4753]: I0129 12:07:15.950475 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7962d88e183df5c4d91f5767f2007068ea2fdfd5c31d9d6ed609c2ab9ca26999\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:15Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:15 crc kubenswrapper[4753]: I0129 12:07:15.968759 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-5m2jf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0203345d-1e9f-4cfe-bde7-90f87221d1a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fdf3d79084d3e17b1f8cb0b285fd425280f34724c6a441104a2ec52b5217bfdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zzwn6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:58Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-5m2jf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:15Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:15 crc kubenswrapper[4753]: I0129 12:07:15.987144 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-x8kzr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d753320a-61c2-4c0e-bd48-96d74b352114\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c7a667b7243fc36ff18cb4c017152fff3d5f4eb5e28e306ace44420cd722c5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tgns4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-x8kzr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:15Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:15 crc kubenswrapper[4753]: I0129 12:07:15.991381 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:15 crc kubenswrapper[4753]: I0129 12:07:15.991420 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:15 crc kubenswrapper[4753]: I0129 12:07:15.991432 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:15 crc kubenswrapper[4753]: I0129 12:07:15.991452 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:15 crc kubenswrapper[4753]: I0129 12:07:15.991835 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:15Z","lastTransitionTime":"2026-01-29T12:07:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:16 crc kubenswrapper[4753]: I0129 12:07:16.015558 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vwcjk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a99ab890-0a5a-4abb-86fb-a3731ff6b2c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd5300a3195f0f4d66a2789abcd65b4ade5652b5d05eb08bfd29932b97137464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd5300a3195f0f4d66a2789abcd65b4ade5652b5d05eb08bfd29932b97137464\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d95b548f1e6e754ff4a6f8d68364586d68b03662d0a679197fd0c99c41745691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d95b548f1e6e754ff4a6f8d68364586d68b03662d0a679197fd0c99c41745691\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b8b1c7640bd4881ba79f3471e309b2d1538aea8d0c8e01d3d7cfc3677d95725a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b8b1c7640bd4881ba79f3471e309b2d1538aea8d0c8e01d3d7cfc3677d95725a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0ba51b6e2180da460faa3599850175db9d7410880ba0214425fa275803f36d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0ba51b6e2180da460faa3599850175db9d7410880ba0214425fa275803f36d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:59Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vwcjk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:16Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:16 crc kubenswrapper[4753]: I0129 12:07:16.036466 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"703a6b9d-a15d-4ed3-b00b-db7bd5d42c61\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cab3ab80a8fddbc591cd07fdbed800f525bb8a1b5505bf818ebb59cbcce73003\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7af8be0836a8ff7d08ff48d9487acdedd4b83127538d071d13b3215400c0f8de\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91d57a9cede640bdcec325175598b72dd00398436b730248728c7d9afd545b1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e86597c91848f265cd70410d57ac60eadba1d0322b9b897097366a41c93d8134\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34151f1c4f743e8fa0a2c27e644bbea9c3911e2252501ad8aa95ad3e4346222a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T12:06:54Z\\\",\\\"message\\\":\\\"ed a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3028279136/tls.crt::/tmp/serving-cert-3028279136/tls.key\\\\\\\"\\\\nI0129 12:06:54.190978 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 12:06:54.198044 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 12:06:54.198078 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 12:06:54.198139 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 12:06:54.198147 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 12:06:54.208089 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 12:06:54.208122 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 12:06:54.208128 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 12:06:54.208132 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 12:06:54.208135 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 12:06:54.208139 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 12:06:54.208143 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 12:06:54.208164 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI0129 12:06:54.212380 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-3028279136/tls.crt::/tmp/serving-cert-3028279136/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1769688397\\\\\\\\\\\\\\\" (2026-01-29 12:06:37 +0000 UTC to 2026-02-28 12:06:38 +0000 UTC (now=2026-01-29 12:06:54.212317535 +0000 UTC))\\\\\\\"\\\\nF0129 12:06:54.212498 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e02d977e34888952623f4ced5e073d7bf1ff360f8deb435dc577d2ecf146cb2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:33Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6538a934d097cd5a295952f64961a0a8fb5bfa5ebcd27a0a4dd3638029b9cdb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6538a934d097cd5a295952f64961a0a8fb5bfa5ebcd27a0a4dd3638029b9cdb1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:16Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:16 crc kubenswrapper[4753]: I0129 12:07:16.060931 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:16Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:16 crc kubenswrapper[4753]: I0129 12:07:16.079668 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:16Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:16 crc kubenswrapper[4753]: I0129 12:07:16.099195 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:16 crc kubenswrapper[4753]: I0129 12:07:16.099425 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:16 crc kubenswrapper[4753]: I0129 12:07:16.099532 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:16 crc kubenswrapper[4753]: I0129 12:07:16.099654 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:16 crc kubenswrapper[4753]: I0129 12:07:16.099726 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:16Z","lastTransitionTime":"2026-01-29T12:07:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:16 crc kubenswrapper[4753]: I0129 12:07:16.105104 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966eb8e59f82bacf20ebfa1b1d5bf9dc0a9cdb3b430ae83ed9bde106eb3fa521\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea7c92ec02a9cf6ec5fbd8efe22a066dfef9b48f042137964c5dd49578c99079\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:16Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:16 crc kubenswrapper[4753]: I0129 12:07:16.118676 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-07 13:58:45.36044309 +0000 UTC Jan 29 12:07:16 crc kubenswrapper[4753]: I0129 12:07:16.127408 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0310995-a7c7-47c3-ae6c-05daaaba92a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e56ac830346c0f85227bcebc9b76c2af8646767d4678934985d7b1f5846dcb82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-txdmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8391a68625fc3d8fdbc4f16b0b7b25e359798d08ce65ad8f482722910873418a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-txdmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:58Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7c24x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:16Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:16 crc kubenswrapper[4753]: I0129 12:07:16.150383 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rnbz9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b372210b-6e1b-4a80-b379-7c1d570712f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://346c36936603f4535d5ad3e6b60e451b05b4d376e2531c813f0c900c92caf5fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqzp6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:59Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rnbz9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:16Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:16 crc kubenswrapper[4753]: I0129 12:07:16.174169 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b108c546-a8c1-4bf9-814b-bfb5871b3013\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9842d9d6fed8c87bc20835964d3271b0be3117fedc658e6ef028db422d1ad478\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4fd4169036d3a646c9e5e4bfa8d86a92c557c3d8f77608ae5308dd0b8c45517\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b4a6bbb431e321e54013c96e3cca1537283cbdf65d89a978780550d603663fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://31bd7db7bddd3e16cb455f7af9fd3a9bca49919c8f1ffb676dec83c543ba7763\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:16Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:16 crc kubenswrapper[4753]: I0129 12:07:16.197306 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:16Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:16 crc kubenswrapper[4753]: I0129 12:07:16.204114 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:16 crc kubenswrapper[4753]: I0129 12:07:16.204277 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:16 crc kubenswrapper[4753]: I0129 12:07:16.204356 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:16 crc kubenswrapper[4753]: I0129 12:07:16.204501 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:16 crc kubenswrapper[4753]: I0129 12:07:16.204573 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:16Z","lastTransitionTime":"2026-01-29T12:07:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:16 crc kubenswrapper[4753]: I0129 12:07:16.309928 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:16 crc kubenswrapper[4753]: I0129 12:07:16.310029 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:16 crc kubenswrapper[4753]: I0129 12:07:16.310056 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:16 crc kubenswrapper[4753]: I0129 12:07:16.310104 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:16 crc kubenswrapper[4753]: I0129 12:07:16.310128 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:16Z","lastTransitionTime":"2026-01-29T12:07:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:17 crc kubenswrapper[4753]: I0129 12:07:17.064157 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:17 crc kubenswrapper[4753]: I0129 12:07:17.064184 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:17 crc kubenswrapper[4753]: I0129 12:07:17.064194 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:17 crc kubenswrapper[4753]: I0129 12:07:17.064209 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:17 crc kubenswrapper[4753]: I0129 12:07:17.064218 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:17Z","lastTransitionTime":"2026-01-29T12:07:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:17 crc kubenswrapper[4753]: I0129 12:07:17.064633 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 12:07:17 crc kubenswrapper[4753]: E0129 12:07:17.064715 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 12:07:17 crc kubenswrapper[4753]: I0129 12:07:17.087844 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-vwcjk" event={"ID":"a99ab890-0a5a-4abb-86fb-a3731ff6b2c1","Type":"ContainerStarted","Data":"0ed771b481178d6e12c669e122932ef25e1fe09db24c8f025f50a469400af184"} Jan 29 12:07:17 crc kubenswrapper[4753]: I0129 12:07:17.111362 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"703a6b9d-a15d-4ed3-b00b-db7bd5d42c61\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cab3ab80a8fddbc591cd07fdbed800f525bb8a1b5505bf818ebb59cbcce73003\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7af8be0836a8ff7d08ff48d9487acdedd4b83127538d071d13b3215400c0f8de\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91d57a9cede640bdcec325175598b72dd00398436b730248728c7d9afd545b1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e86597c91848f265cd70410d57ac60eadba1d0322b9b897097366a41c93d8134\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34151f1c4f743e8fa0a2c27e644bbea9c3911e2252501ad8aa95ad3e4346222a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T12:06:54Z\\\",\\\"message\\\":\\\"ed a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3028279136/tls.crt::/tmp/serving-cert-3028279136/tls.key\\\\\\\"\\\\nI0129 12:06:54.190978 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 12:06:54.198044 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 12:06:54.198078 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 12:06:54.198139 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 12:06:54.198147 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 12:06:54.208089 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 12:06:54.208122 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 12:06:54.208128 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 12:06:54.208132 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 12:06:54.208135 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 12:06:54.208139 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 12:06:54.208143 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 12:06:54.208164 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI0129 12:06:54.212380 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-3028279136/tls.crt::/tmp/serving-cert-3028279136/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1769688397\\\\\\\\\\\\\\\" (2026-01-29 12:06:37 +0000 UTC to 2026-02-28 12:06:38 +0000 UTC (now=2026-01-29 12:06:54.212317535 +0000 UTC))\\\\\\\"\\\\nF0129 12:06:54.212498 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e02d977e34888952623f4ced5e073d7bf1ff360f8deb435dc577d2ecf146cb2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:33Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6538a934d097cd5a295952f64961a0a8fb5bfa5ebcd27a0a4dd3638029b9cdb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6538a934d097cd5a295952f64961a0a8fb5bfa5ebcd27a0a4dd3638029b9cdb1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:17Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:17 crc kubenswrapper[4753]: I0129 12:07:17.119952 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-10 12:23:21.150354716 +0000 UTC Jan 29 12:07:17 crc kubenswrapper[4753]: I0129 12:07:17.128410 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:17Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:17 crc kubenswrapper[4753]: I0129 12:07:17.142988 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-x8kzr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d753320a-61c2-4c0e-bd48-96d74b352114\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c7a667b7243fc36ff18cb4c017152fff3d5f4eb5e28e306ace44420cd722c5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tgns4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-x8kzr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:17Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:17 crc kubenswrapper[4753]: I0129 12:07:17.161918 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vwcjk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a99ab890-0a5a-4abb-86fb-a3731ff6b2c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd5300a3195f0f4d66a2789abcd65b4ade5652b5d05eb08bfd29932b97137464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd5300a3195f0f4d66a2789abcd65b4ade5652b5d05eb08bfd29932b97137464\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d95b548f1e6e754ff4a6f8d68364586d68b03662d0a679197fd0c99c41745691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d95b548f1e6e754ff4a6f8d68364586d68b03662d0a679197fd0c99c41745691\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b8b1c7640bd4881ba79f3471e309b2d1538aea8d0c8e01d3d7cfc3677d95725a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b8b1c7640bd4881ba79f3471e309b2d1538aea8d0c8e01d3d7cfc3677d95725a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0ba51b6e2180da460faa3599850175db9d7410880ba0214425fa275803f36d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0ba51b6e2180da460faa3599850175db9d7410880ba0214425fa275803f36d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0ed771b481178d6e12c669e122932ef25e1fe09db24c8f025f50a469400af184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:59Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vwcjk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:17Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:17 crc kubenswrapper[4753]: I0129 12:07:17.168912 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:17 crc kubenswrapper[4753]: I0129 12:07:17.168963 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:17 crc kubenswrapper[4753]: I0129 12:07:17.168974 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:17 crc kubenswrapper[4753]: I0129 12:07:17.168989 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:17 crc kubenswrapper[4753]: I0129 12:07:17.168998 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:17Z","lastTransitionTime":"2026-01-29T12:07:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:17 crc kubenswrapper[4753]: I0129 12:07:17.180288 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rnbz9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b372210b-6e1b-4a80-b379-7c1d570712f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://346c36936603f4535d5ad3e6b60e451b05b4d376e2531c813f0c900c92caf5fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqzp6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:59Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rnbz9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:17Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:17 crc kubenswrapper[4753]: I0129 12:07:17.196629 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b108c546-a8c1-4bf9-814b-bfb5871b3013\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9842d9d6fed8c87bc20835964d3271b0be3117fedc658e6ef028db422d1ad478\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4fd4169036d3a646c9e5e4bfa8d86a92c557c3d8f77608ae5308dd0b8c45517\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b4a6bbb431e321e54013c96e3cca1537283cbdf65d89a978780550d603663fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://31bd7db7bddd3e16cb455f7af9fd3a9bca49919c8f1ffb676dec83c543ba7763\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:17Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:17 crc kubenswrapper[4753]: I0129 12:07:17.213242 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:17Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:17 crc kubenswrapper[4753]: I0129 12:07:17.229621 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:17Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:17 crc kubenswrapper[4753]: I0129 12:07:17.249259 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966eb8e59f82bacf20ebfa1b1d5bf9dc0a9cdb3b430ae83ed9bde106eb3fa521\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea7c92ec02a9cf6ec5fbd8efe22a066dfef9b48f042137964c5dd49578c99079\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:17Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:17 crc kubenswrapper[4753]: I0129 12:07:17.263751 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0310995-a7c7-47c3-ae6c-05daaaba92a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e56ac830346c0f85227bcebc9b76c2af8646767d4678934985d7b1f5846dcb82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-txdmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8391a68625fc3d8fdbc4f16b0b7b25e359798d08ce65ad8f482722910873418a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-txdmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:58Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7c24x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:17Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:17 crc kubenswrapper[4753]: I0129 12:07:17.271584 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:17 crc kubenswrapper[4753]: I0129 12:07:17.271650 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:17 crc kubenswrapper[4753]: I0129 12:07:17.271669 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:17 crc kubenswrapper[4753]: I0129 12:07:17.271693 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:17 crc kubenswrapper[4753]: I0129 12:07:17.271708 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:17Z","lastTransitionTime":"2026-01-29T12:07:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:17 crc kubenswrapper[4753]: I0129 12:07:17.285838 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5d7ccf10988fd83d31d42be6f82403067a0cc50313c4d5dcddfb528507efffa4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:17Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:17 crc kubenswrapper[4753]: I0129 12:07:17.308647 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80bec2ab-0a88-4818-9339-760edda3b07e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f15016e29f1fc14f51b4d01e772071333fd8a885cacc1804b3a03dbcfc93836\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1e6a7d8df812546f17b3e026005c5062086e97e3fa364b3fa22cefec025e4eba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f71859413676dab4a435d43c11ad7cba315a12de744c3c8b161273f6ad36580c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0ec2a21ec299b5eed363668f39a270a8b03ea79b9de3dd23bda0c30579c3de3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91c1df8b6763320498420463a3841f0bfa4fcf753c355f549c3c1e93bf5206a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://05d1e99596530f467ecdae51aec8db191ac045cd85e121fc9835e2f3688f6ae7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://34d19c3c88e356688bc3f0dbe54c1a46ef5e5f5fedbf84c5cb536b3c272decb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e40e5b13dfdc135ea09253d466bacdfc5c2ac3d28eabc76d833d71ac300905c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:07:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-nzkvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:17Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:17 crc kubenswrapper[4753]: I0129 12:07:17.328765 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7962d88e183df5c4d91f5767f2007068ea2fdfd5c31d9d6ed609c2ab9ca26999\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:17Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:17 crc kubenswrapper[4753]: I0129 12:07:17.347418 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-5m2jf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0203345d-1e9f-4cfe-bde7-90f87221d1a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fdf3d79084d3e17b1f8cb0b285fd425280f34724c6a441104a2ec52b5217bfdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zzwn6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:58Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-5m2jf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:17Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:17 crc kubenswrapper[4753]: I0129 12:07:17.375702 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:17 crc kubenswrapper[4753]: I0129 12:07:17.375750 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:17 crc kubenswrapper[4753]: I0129 12:07:17.375761 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:17 crc kubenswrapper[4753]: I0129 12:07:17.375780 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:17 crc kubenswrapper[4753]: I0129 12:07:17.375811 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:17Z","lastTransitionTime":"2026-01-29T12:07:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:17 crc kubenswrapper[4753]: I0129 12:07:17.480915 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:17 crc kubenswrapper[4753]: I0129 12:07:17.480991 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:17 crc kubenswrapper[4753]: I0129 12:07:17.481026 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:17 crc kubenswrapper[4753]: I0129 12:07:17.481074 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:17 crc kubenswrapper[4753]: I0129 12:07:17.481116 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:17Z","lastTransitionTime":"2026-01-29T12:07:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:17 crc kubenswrapper[4753]: I0129 12:07:17.585125 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:17 crc kubenswrapper[4753]: I0129 12:07:17.585182 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:17 crc kubenswrapper[4753]: I0129 12:07:17.585195 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:17 crc kubenswrapper[4753]: I0129 12:07:17.585216 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:17 crc kubenswrapper[4753]: I0129 12:07:17.585256 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:17Z","lastTransitionTime":"2026-01-29T12:07:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:19 crc kubenswrapper[4753]: I0129 12:07:19.024662 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-14 15:02:00.304591359 +0000 UTC Jan 29 12:07:19 crc kubenswrapper[4753]: I0129 12:07:19.046304 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 12:07:19 crc kubenswrapper[4753]: E0129 12:07:19.046562 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 12:07:19 crc kubenswrapper[4753]: I0129 12:07:19.046989 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 12:07:19 crc kubenswrapper[4753]: E0129 12:07:19.048542 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 12:07:19 crc kubenswrapper[4753]: I0129 12:07:19.057294 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 12:07:19 crc kubenswrapper[4753]: E0129 12:07:19.057463 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 12:07:19 crc kubenswrapper[4753]: I0129 12:07:19.079448 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966eb8e59f82bacf20ebfa1b1d5bf9dc0a9cdb3b430ae83ed9bde106eb3fa521\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea7c92ec02a9cf6ec5fbd8efe22a066dfef9b48f042137964c5dd49578c99079\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:19Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:19 crc kubenswrapper[4753]: I0129 12:07:19.079815 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:19 crc kubenswrapper[4753]: I0129 12:07:19.079896 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:19 crc kubenswrapper[4753]: I0129 12:07:19.081402 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:19 crc kubenswrapper[4753]: I0129 12:07:19.081444 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:19 crc kubenswrapper[4753]: I0129 12:07:19.081457 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:19Z","lastTransitionTime":"2026-01-29T12:07:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:19 crc kubenswrapper[4753]: I0129 12:07:19.094075 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0310995-a7c7-47c3-ae6c-05daaaba92a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e56ac830346c0f85227bcebc9b76c2af8646767d4678934985d7b1f5846dcb82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-txdmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8391a68625fc3d8fdbc4f16b0b7b25e359798d08ce65ad8f482722910873418a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-txdmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:58Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7c24x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:19Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:19 crc kubenswrapper[4753]: I0129 12:07:19.167451 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rnbz9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b372210b-6e1b-4a80-b379-7c1d570712f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://346c36936603f4535d5ad3e6b60e451b05b4d376e2531c813f0c900c92caf5fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqzp6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:59Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rnbz9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:19Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:19 crc kubenswrapper[4753]: I0129 12:07:19.189060 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:19 crc kubenswrapper[4753]: I0129 12:07:19.189139 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:19 crc kubenswrapper[4753]: I0129 12:07:19.189152 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:19 crc kubenswrapper[4753]: I0129 12:07:19.189174 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:19 crc kubenswrapper[4753]: I0129 12:07:19.189189 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:19Z","lastTransitionTime":"2026-01-29T12:07:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:19 crc kubenswrapper[4753]: I0129 12:07:19.191591 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b108c546-a8c1-4bf9-814b-bfb5871b3013\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9842d9d6fed8c87bc20835964d3271b0be3117fedc658e6ef028db422d1ad478\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4fd4169036d3a646c9e5e4bfa8d86a92c557c3d8f77608ae5308dd0b8c45517\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b4a6bbb431e321e54013c96e3cca1537283cbdf65d89a978780550d603663fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://31bd7db7bddd3e16cb455f7af9fd3a9bca49919c8f1ffb676dec83c543ba7763\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:19Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:19 crc kubenswrapper[4753]: I0129 12:07:19.213598 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:19Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:19 crc kubenswrapper[4753]: I0129 12:07:19.230273 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:19Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:19 crc kubenswrapper[4753]: I0129 12:07:19.253993 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5d7ccf10988fd83d31d42be6f82403067a0cc50313c4d5dcddfb528507efffa4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:19Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:19 crc kubenswrapper[4753]: I0129 12:07:19.285932 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80bec2ab-0a88-4818-9339-760edda3b07e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f15016e29f1fc14f51b4d01e772071333fd8a885cacc1804b3a03dbcfc93836\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1e6a7d8df812546f17b3e026005c5062086e97e3fa364b3fa22cefec025e4eba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f71859413676dab4a435d43c11ad7cba315a12de744c3c8b161273f6ad36580c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0ec2a21ec299b5eed363668f39a270a8b03ea79b9de3dd23bda0c30579c3de3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91c1df8b6763320498420463a3841f0bfa4fcf753c355f549c3c1e93bf5206a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://05d1e99596530f467ecdae51aec8db191ac045cd85e121fc9835e2f3688f6ae7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://34d19c3c88e356688bc3f0dbe54c1a46ef5e5f5fedbf84c5cb536b3c272decb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e40e5b13dfdc135ea09253d466bacdfc5c2ac3d28eabc76d833d71ac300905c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:07:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-nzkvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:19Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:19 crc kubenswrapper[4753]: I0129 12:07:19.293604 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:19 crc kubenswrapper[4753]: I0129 12:07:19.293686 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:19 crc kubenswrapper[4753]: I0129 12:07:19.293698 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:19 crc kubenswrapper[4753]: I0129 12:07:19.293720 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:19 crc kubenswrapper[4753]: I0129 12:07:19.293732 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:19Z","lastTransitionTime":"2026-01-29T12:07:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:19 crc kubenswrapper[4753]: I0129 12:07:19.304882 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7962d88e183df5c4d91f5767f2007068ea2fdfd5c31d9d6ed609c2ab9ca26999\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:19Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:19 crc kubenswrapper[4753]: I0129 12:07:19.327679 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-5m2jf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0203345d-1e9f-4cfe-bde7-90f87221d1a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fdf3d79084d3e17b1f8cb0b285fd425280f34724c6a441104a2ec52b5217bfdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zzwn6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:58Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-5m2jf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:19Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:19 crc kubenswrapper[4753]: I0129 12:07:19.355105 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vwcjk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a99ab890-0a5a-4abb-86fb-a3731ff6b2c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd5300a3195f0f4d66a2789abcd65b4ade5652b5d05eb08bfd29932b97137464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd5300a3195f0f4d66a2789abcd65b4ade5652b5d05eb08bfd29932b97137464\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d95b548f1e6e754ff4a6f8d68364586d68b03662d0a679197fd0c99c41745691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d95b548f1e6e754ff4a6f8d68364586d68b03662d0a679197fd0c99c41745691\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b8b1c7640bd4881ba79f3471e309b2d1538aea8d0c8e01d3d7cfc3677d95725a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b8b1c7640bd4881ba79f3471e309b2d1538aea8d0c8e01d3d7cfc3677d95725a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0ba51b6e2180da460faa3599850175db9d7410880ba0214425fa275803f36d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0ba51b6e2180da460faa3599850175db9d7410880ba0214425fa275803f36d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0ed771b481178d6e12c669e122932ef25e1fe09db24c8f025f50a469400af184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:59Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vwcjk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:19Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:19 crc kubenswrapper[4753]: I0129 12:07:19.374042 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"703a6b9d-a15d-4ed3-b00b-db7bd5d42c61\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cab3ab80a8fddbc591cd07fdbed800f525bb8a1b5505bf818ebb59cbcce73003\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7af8be0836a8ff7d08ff48d9487acdedd4b83127538d071d13b3215400c0f8de\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91d57a9cede640bdcec325175598b72dd00398436b730248728c7d9afd545b1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e86597c91848f265cd70410d57ac60eadba1d0322b9b897097366a41c93d8134\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34151f1c4f743e8fa0a2c27e644bbea9c3911e2252501ad8aa95ad3e4346222a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T12:06:54Z\\\",\\\"message\\\":\\\"ed a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3028279136/tls.crt::/tmp/serving-cert-3028279136/tls.key\\\\\\\"\\\\nI0129 12:06:54.190978 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 12:06:54.198044 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 12:06:54.198078 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 12:06:54.198139 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 12:06:54.198147 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 12:06:54.208089 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 12:06:54.208122 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 12:06:54.208128 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 12:06:54.208132 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 12:06:54.208135 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 12:06:54.208139 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 12:06:54.208143 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 12:06:54.208164 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI0129 12:06:54.212380 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-3028279136/tls.crt::/tmp/serving-cert-3028279136/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1769688397\\\\\\\\\\\\\\\" (2026-01-29 12:06:37 +0000 UTC to 2026-02-28 12:06:38 +0000 UTC (now=2026-01-29 12:06:54.212317535 +0000 UTC))\\\\\\\"\\\\nF0129 12:06:54.212498 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e02d977e34888952623f4ced5e073d7bf1ff360f8deb435dc577d2ecf146cb2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:33Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6538a934d097cd5a295952f64961a0a8fb5bfa5ebcd27a0a4dd3638029b9cdb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6538a934d097cd5a295952f64961a0a8fb5bfa5ebcd27a0a4dd3638029b9cdb1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:19Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:19 crc kubenswrapper[4753]: I0129 12:07:19.391839 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:19Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:19 crc kubenswrapper[4753]: I0129 12:07:19.403063 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:19 crc kubenswrapper[4753]: I0129 12:07:19.403157 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:19 crc kubenswrapper[4753]: I0129 12:07:19.403186 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:19 crc kubenswrapper[4753]: I0129 12:07:19.403216 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:19 crc kubenswrapper[4753]: I0129 12:07:19.403258 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:19Z","lastTransitionTime":"2026-01-29T12:07:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:19 crc kubenswrapper[4753]: I0129 12:07:19.411526 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-x8kzr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d753320a-61c2-4c0e-bd48-96d74b352114\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c7a667b7243fc36ff18cb4c017152fff3d5f4eb5e28e306ace44420cd722c5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tgns4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-x8kzr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:19Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:19 crc kubenswrapper[4753]: I0129 12:07:19.507630 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:19 crc kubenswrapper[4753]: I0129 12:07:19.507692 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:19 crc kubenswrapper[4753]: I0129 12:07:19.507706 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:19 crc kubenswrapper[4753]: I0129 12:07:19.507729 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:19 crc kubenswrapper[4753]: I0129 12:07:19.507742 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:19Z","lastTransitionTime":"2026-01-29T12:07:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:19 crc kubenswrapper[4753]: I0129 12:07:19.610762 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:19 crc kubenswrapper[4753]: I0129 12:07:19.610818 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:19 crc kubenswrapper[4753]: I0129 12:07:19.610828 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:19 crc kubenswrapper[4753]: I0129 12:07:19.610846 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:19 crc kubenswrapper[4753]: I0129 12:07:19.610856 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:19Z","lastTransitionTime":"2026-01-29T12:07:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:19 crc kubenswrapper[4753]: I0129 12:07:19.713868 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:19 crc kubenswrapper[4753]: I0129 12:07:19.713920 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:19 crc kubenswrapper[4753]: I0129 12:07:19.713937 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:19 crc kubenswrapper[4753]: I0129 12:07:19.713960 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:19 crc kubenswrapper[4753]: I0129 12:07:19.713971 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:19Z","lastTransitionTime":"2026-01-29T12:07:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:19 crc kubenswrapper[4753]: I0129 12:07:19.819069 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:19 crc kubenswrapper[4753]: I0129 12:07:19.819137 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:19 crc kubenswrapper[4753]: I0129 12:07:19.819155 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:19 crc kubenswrapper[4753]: I0129 12:07:19.819185 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:19 crc kubenswrapper[4753]: I0129 12:07:19.819206 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:19Z","lastTransitionTime":"2026-01-29T12:07:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:20 crc kubenswrapper[4753]: I0129 12:07:20.024041 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:20 crc kubenswrapper[4753]: I0129 12:07:20.024246 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:20 crc kubenswrapper[4753]: I0129 12:07:20.024264 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:20 crc kubenswrapper[4753]: I0129 12:07:20.024302 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:20 crc kubenswrapper[4753]: I0129 12:07:20.024314 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:20Z","lastTransitionTime":"2026-01-29T12:07:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:20 crc kubenswrapper[4753]: I0129 12:07:20.040378 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-07 12:25:56.423711695 +0000 UTC Jan 29 12:07:20 crc kubenswrapper[4753]: I0129 12:07:20.128648 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:20 crc kubenswrapper[4753]: I0129 12:07:20.128717 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:20 crc kubenswrapper[4753]: I0129 12:07:20.128727 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:20 crc kubenswrapper[4753]: I0129 12:07:20.128746 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:20 crc kubenswrapper[4753]: I0129 12:07:20.128756 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:20Z","lastTransitionTime":"2026-01-29T12:07:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:20 crc kubenswrapper[4753]: I0129 12:07:20.232918 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:20 crc kubenswrapper[4753]: I0129 12:07:20.232998 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:20 crc kubenswrapper[4753]: I0129 12:07:20.233035 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:20 crc kubenswrapper[4753]: I0129 12:07:20.233086 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:20 crc kubenswrapper[4753]: I0129 12:07:20.233114 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:20Z","lastTransitionTime":"2026-01-29T12:07:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:20 crc kubenswrapper[4753]: I0129 12:07:20.337595 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:20 crc kubenswrapper[4753]: I0129 12:07:20.337662 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:20 crc kubenswrapper[4753]: I0129 12:07:20.337674 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:20 crc kubenswrapper[4753]: I0129 12:07:20.337700 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:20 crc kubenswrapper[4753]: I0129 12:07:20.337713 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:20Z","lastTransitionTime":"2026-01-29T12:07:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:20 crc kubenswrapper[4753]: I0129 12:07:20.441177 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:20 crc kubenswrapper[4753]: I0129 12:07:20.441218 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:20 crc kubenswrapper[4753]: I0129 12:07:20.441248 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:20 crc kubenswrapper[4753]: I0129 12:07:20.441279 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:20 crc kubenswrapper[4753]: I0129 12:07:20.441297 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:20Z","lastTransitionTime":"2026-01-29T12:07:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:20 crc kubenswrapper[4753]: I0129 12:07:20.543285 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:20 crc kubenswrapper[4753]: I0129 12:07:20.543361 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:20 crc kubenswrapper[4753]: I0129 12:07:20.543373 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:20 crc kubenswrapper[4753]: I0129 12:07:20.543393 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:20 crc kubenswrapper[4753]: I0129 12:07:20.543407 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:20Z","lastTransitionTime":"2026-01-29T12:07:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:20 crc kubenswrapper[4753]: I0129 12:07:20.673032 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:20 crc kubenswrapper[4753]: I0129 12:07:20.673082 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:20 crc kubenswrapper[4753]: I0129 12:07:20.673092 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:20 crc kubenswrapper[4753]: I0129 12:07:20.673111 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:20 crc kubenswrapper[4753]: I0129 12:07:20.673120 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:20Z","lastTransitionTime":"2026-01-29T12:07:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:20 crc kubenswrapper[4753]: I0129 12:07:20.776463 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:20 crc kubenswrapper[4753]: I0129 12:07:20.776540 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:20 crc kubenswrapper[4753]: I0129 12:07:20.776552 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:20 crc kubenswrapper[4753]: I0129 12:07:20.776574 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:20 crc kubenswrapper[4753]: I0129 12:07:20.776588 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:20Z","lastTransitionTime":"2026-01-29T12:07:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:20 crc kubenswrapper[4753]: I0129 12:07:20.879646 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:20 crc kubenswrapper[4753]: I0129 12:07:20.879707 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:20 crc kubenswrapper[4753]: I0129 12:07:20.879718 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:20 crc kubenswrapper[4753]: I0129 12:07:20.879737 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:20 crc kubenswrapper[4753]: I0129 12:07:20.879755 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:20Z","lastTransitionTime":"2026-01-29T12:07:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:20 crc kubenswrapper[4753]: I0129 12:07:20.887987 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 12:07:20 crc kubenswrapper[4753]: I0129 12:07:20.887987 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 12:07:20 crc kubenswrapper[4753]: E0129 12:07:20.888189 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 12:07:20 crc kubenswrapper[4753]: I0129 12:07:20.888142 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 12:07:20 crc kubenswrapper[4753]: E0129 12:07:20.888310 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 12:07:20 crc kubenswrapper[4753]: E0129 12:07:20.888517 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 12:07:20 crc kubenswrapper[4753]: I0129 12:07:20.961263 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 12:07:20 crc kubenswrapper[4753]: I0129 12:07:20.985980 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:20 crc kubenswrapper[4753]: I0129 12:07:20.986022 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:20 crc kubenswrapper[4753]: I0129 12:07:20.986032 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:20 crc kubenswrapper[4753]: I0129 12:07:20.986050 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:20 crc kubenswrapper[4753]: I0129 12:07:20.986063 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:20Z","lastTransitionTime":"2026-01-29T12:07:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:21 crc kubenswrapper[4753]: I0129 12:07:21.040856 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-01 07:55:46.973907331 +0000 UTC Jan 29 12:07:21 crc kubenswrapper[4753]: I0129 12:07:21.082060 4753 generic.go:334] "Generic (PLEG): container finished" podID="a99ab890-0a5a-4abb-86fb-a3731ff6b2c1" containerID="0ed771b481178d6e12c669e122932ef25e1fe09db24c8f025f50a469400af184" exitCode=0 Jan 29 12:07:21 crc kubenswrapper[4753]: I0129 12:07:21.082179 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-vwcjk" event={"ID":"a99ab890-0a5a-4abb-86fb-a3731ff6b2c1","Type":"ContainerDied","Data":"0ed771b481178d6e12c669e122932ef25e1fe09db24c8f025f50a469400af184"} Jan 29 12:07:21 crc kubenswrapper[4753]: I0129 12:07:21.090270 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-9dtck"] Jan 29 12:07:21 crc kubenswrapper[4753]: I0129 12:07:21.091168 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-9dtck" Jan 29 12:07:21 crc kubenswrapper[4753]: I0129 12:07:21.095500 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Jan 29 12:07:21 crc kubenswrapper[4753]: I0129 12:07:21.095742 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Jan 29 12:07:21 crc kubenswrapper[4753]: I0129 12:07:21.099336 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:21 crc kubenswrapper[4753]: I0129 12:07:21.099406 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:21 crc kubenswrapper[4753]: I0129 12:07:21.099427 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:21 crc kubenswrapper[4753]: I0129 12:07:21.099456 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:21 crc kubenswrapper[4753]: I0129 12:07:21.099469 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:21Z","lastTransitionTime":"2026-01-29T12:07:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:21 crc kubenswrapper[4753]: I0129 12:07:21.114646 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80bec2ab-0a88-4818-9339-760edda3b07e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f15016e29f1fc14f51b4d01e772071333fd8a885cacc1804b3a03dbcfc93836\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1e6a7d8df812546f17b3e026005c5062086e97e3fa364b3fa22cefec025e4eba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f71859413676dab4a435d43c11ad7cba315a12de744c3c8b161273f6ad36580c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0ec2a21ec299b5eed363668f39a270a8b03ea79b9de3dd23bda0c30579c3de3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91c1df8b6763320498420463a3841f0bfa4fcf753c355f549c3c1e93bf5206a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://05d1e99596530f467ecdae51aec8db191ac045cd85e121fc9835e2f3688f6ae7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://34d19c3c88e356688bc3f0dbe54c1a46ef5e5f5fedbf84c5cb536b3c272decb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e40e5b13dfdc135ea09253d466bacdfc5c2ac3d28eabc76d833d71ac300905c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:07:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-nzkvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:21Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:21 crc kubenswrapper[4753]: I0129 12:07:21.131339 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5d7ccf10988fd83d31d42be6f82403067a0cc50313c4d5dcddfb528507efffa4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:21Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:21 crc kubenswrapper[4753]: I0129 12:07:21.149285 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7962d88e183df5c4d91f5767f2007068ea2fdfd5c31d9d6ed609c2ab9ca26999\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:21Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:21 crc kubenswrapper[4753]: I0129 12:07:21.163569 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-5m2jf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0203345d-1e9f-4cfe-bde7-90f87221d1a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fdf3d79084d3e17b1f8cb0b285fd425280f34724c6a441104a2ec52b5217bfdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zzwn6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:58Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-5m2jf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:21Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:21 crc kubenswrapper[4753]: I0129 12:07:21.176067 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/132de771-c8b4-403f-8916-8b453e7c6fc3-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-9dtck\" (UID: \"132de771-c8b4-403f-8916-8b453e7c6fc3\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-9dtck" Jan 29 12:07:21 crc kubenswrapper[4753]: I0129 12:07:21.176184 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/132de771-c8b4-403f-8916-8b453e7c6fc3-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-9dtck\" (UID: \"132de771-c8b4-403f-8916-8b453e7c6fc3\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-9dtck" Jan 29 12:07:21 crc kubenswrapper[4753]: I0129 12:07:21.176435 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/132de771-c8b4-403f-8916-8b453e7c6fc3-env-overrides\") pod \"ovnkube-control-plane-749d76644c-9dtck\" (UID: \"132de771-c8b4-403f-8916-8b453e7c6fc3\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-9dtck" Jan 29 12:07:21 crc kubenswrapper[4753]: I0129 12:07:21.176774 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gvjpd\" (UniqueName: \"kubernetes.io/projected/132de771-c8b4-403f-8916-8b453e7c6fc3-kube-api-access-gvjpd\") pod \"ovnkube-control-plane-749d76644c-9dtck\" (UID: \"132de771-c8b4-403f-8916-8b453e7c6fc3\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-9dtck" Jan 29 12:07:21 crc kubenswrapper[4753]: I0129 12:07:21.181657 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"703a6b9d-a15d-4ed3-b00b-db7bd5d42c61\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cab3ab80a8fddbc591cd07fdbed800f525bb8a1b5505bf818ebb59cbcce73003\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7af8be0836a8ff7d08ff48d9487acdedd4b83127538d071d13b3215400c0f8de\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91d57a9cede640bdcec325175598b72dd00398436b730248728c7d9afd545b1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e86597c91848f265cd70410d57ac60eadba1d0322b9b897097366a41c93d8134\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34151f1c4f743e8fa0a2c27e644bbea9c3911e2252501ad8aa95ad3e4346222a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T12:06:54Z\\\",\\\"message\\\":\\\"ed a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3028279136/tls.crt::/tmp/serving-cert-3028279136/tls.key\\\\\\\"\\\\nI0129 12:06:54.190978 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 12:06:54.198044 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 12:06:54.198078 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 12:06:54.198139 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 12:06:54.198147 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 12:06:54.208089 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 12:06:54.208122 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 12:06:54.208128 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 12:06:54.208132 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 12:06:54.208135 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 12:06:54.208139 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 12:06:54.208143 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 12:06:54.208164 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI0129 12:06:54.212380 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-3028279136/tls.crt::/tmp/serving-cert-3028279136/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1769688397\\\\\\\\\\\\\\\" (2026-01-29 12:06:37 +0000 UTC to 2026-02-28 12:06:38 +0000 UTC (now=2026-01-29 12:06:54.212317535 +0000 UTC))\\\\\\\"\\\\nF0129 12:06:54.212498 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e02d977e34888952623f4ced5e073d7bf1ff360f8deb435dc577d2ecf146cb2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:33Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6538a934d097cd5a295952f64961a0a8fb5bfa5ebcd27a0a4dd3638029b9cdb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6538a934d097cd5a295952f64961a0a8fb5bfa5ebcd27a0a4dd3638029b9cdb1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:21Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:21 crc kubenswrapper[4753]: I0129 12:07:21.200977 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:21Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:21 crc kubenswrapper[4753]: I0129 12:07:21.205262 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:21 crc kubenswrapper[4753]: I0129 12:07:21.205447 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:21 crc kubenswrapper[4753]: I0129 12:07:21.205468 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:21 crc kubenswrapper[4753]: I0129 12:07:21.205493 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:21 crc kubenswrapper[4753]: I0129 12:07:21.205505 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:21Z","lastTransitionTime":"2026-01-29T12:07:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:21 crc kubenswrapper[4753]: I0129 12:07:21.226212 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-x8kzr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d753320a-61c2-4c0e-bd48-96d74b352114\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c7a667b7243fc36ff18cb4c017152fff3d5f4eb5e28e306ace44420cd722c5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tgns4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-x8kzr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:21Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:21 crc kubenswrapper[4753]: I0129 12:07:21.246143 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vwcjk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a99ab890-0a5a-4abb-86fb-a3731ff6b2c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd5300a3195f0f4d66a2789abcd65b4ade5652b5d05eb08bfd29932b97137464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd5300a3195f0f4d66a2789abcd65b4ade5652b5d05eb08bfd29932b97137464\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d95b548f1e6e754ff4a6f8d68364586d68b03662d0a679197fd0c99c41745691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d95b548f1e6e754ff4a6f8d68364586d68b03662d0a679197fd0c99c41745691\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b8b1c7640bd4881ba79f3471e309b2d1538aea8d0c8e01d3d7cfc3677d95725a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b8b1c7640bd4881ba79f3471e309b2d1538aea8d0c8e01d3d7cfc3677d95725a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0ba51b6e2180da460faa3599850175db9d7410880ba0214425fa275803f36d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0ba51b6e2180da460faa3599850175db9d7410880ba0214425fa275803f36d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0ed771b481178d6e12c669e122932ef25e1fe09db24c8f025f50a469400af184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0ed771b481178d6e12c669e122932ef25e1fe09db24c8f025f50a469400af184\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:59Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vwcjk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:21Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:21 crc kubenswrapper[4753]: I0129 12:07:21.263481 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b108c546-a8c1-4bf9-814b-bfb5871b3013\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9842d9d6fed8c87bc20835964d3271b0be3117fedc658e6ef028db422d1ad478\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4fd4169036d3a646c9e5e4bfa8d86a92c557c3d8f77608ae5308dd0b8c45517\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b4a6bbb431e321e54013c96e3cca1537283cbdf65d89a978780550d603663fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://31bd7db7bddd3e16cb455f7af9fd3a9bca49919c8f1ffb676dec83c543ba7763\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:21Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:21 crc kubenswrapper[4753]: I0129 12:07:21.278219 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/132de771-c8b4-403f-8916-8b453e7c6fc3-env-overrides\") pod \"ovnkube-control-plane-749d76644c-9dtck\" (UID: \"132de771-c8b4-403f-8916-8b453e7c6fc3\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-9dtck" Jan 29 12:07:21 crc kubenswrapper[4753]: I0129 12:07:21.278280 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gvjpd\" (UniqueName: \"kubernetes.io/projected/132de771-c8b4-403f-8916-8b453e7c6fc3-kube-api-access-gvjpd\") pod \"ovnkube-control-plane-749d76644c-9dtck\" (UID: \"132de771-c8b4-403f-8916-8b453e7c6fc3\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-9dtck" Jan 29 12:07:21 crc kubenswrapper[4753]: I0129 12:07:21.278305 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/132de771-c8b4-403f-8916-8b453e7c6fc3-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-9dtck\" (UID: \"132de771-c8b4-403f-8916-8b453e7c6fc3\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-9dtck" Jan 29 12:07:21 crc kubenswrapper[4753]: I0129 12:07:21.278331 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/132de771-c8b4-403f-8916-8b453e7c6fc3-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-9dtck\" (UID: \"132de771-c8b4-403f-8916-8b453e7c6fc3\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-9dtck" Jan 29 12:07:21 crc kubenswrapper[4753]: I0129 12:07:21.279118 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/132de771-c8b4-403f-8916-8b453e7c6fc3-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-9dtck\" (UID: \"132de771-c8b4-403f-8916-8b453e7c6fc3\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-9dtck" Jan 29 12:07:21 crc kubenswrapper[4753]: I0129 12:07:21.279158 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/132de771-c8b4-403f-8916-8b453e7c6fc3-env-overrides\") pod \"ovnkube-control-plane-749d76644c-9dtck\" (UID: \"132de771-c8b4-403f-8916-8b453e7c6fc3\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-9dtck" Jan 29 12:07:21 crc kubenswrapper[4753]: I0129 12:07:21.279583 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:21Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:21 crc kubenswrapper[4753]: I0129 12:07:21.286362 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/132de771-c8b4-403f-8916-8b453e7c6fc3-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-9dtck\" (UID: \"132de771-c8b4-403f-8916-8b453e7c6fc3\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-9dtck" Jan 29 12:07:21 crc kubenswrapper[4753]: I0129 12:07:21.296043 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:21Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:21 crc kubenswrapper[4753]: I0129 12:07:21.300073 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gvjpd\" (UniqueName: \"kubernetes.io/projected/132de771-c8b4-403f-8916-8b453e7c6fc3-kube-api-access-gvjpd\") pod \"ovnkube-control-plane-749d76644c-9dtck\" (UID: \"132de771-c8b4-403f-8916-8b453e7c6fc3\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-9dtck" Jan 29 12:07:21 crc kubenswrapper[4753]: I0129 12:07:21.310427 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:21 crc kubenswrapper[4753]: I0129 12:07:21.310478 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:21 crc kubenswrapper[4753]: I0129 12:07:21.310490 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:21 crc kubenswrapper[4753]: I0129 12:07:21.310509 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:21 crc kubenswrapper[4753]: I0129 12:07:21.310524 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:21Z","lastTransitionTime":"2026-01-29T12:07:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:21 crc kubenswrapper[4753]: I0129 12:07:21.323629 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966eb8e59f82bacf20ebfa1b1d5bf9dc0a9cdb3b430ae83ed9bde106eb3fa521\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea7c92ec02a9cf6ec5fbd8efe22a066dfef9b48f042137964c5dd49578c99079\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:21Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:21 crc kubenswrapper[4753]: I0129 12:07:21.481857 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-9dtck" Jan 29 12:07:21 crc kubenswrapper[4753]: I0129 12:07:21.482346 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0310995-a7c7-47c3-ae6c-05daaaba92a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e56ac830346c0f85227bcebc9b76c2af8646767d4678934985d7b1f5846dcb82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-txdmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8391a68625fc3d8fdbc4f16b0b7b25e359798d08ce65ad8f482722910873418a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-txdmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:58Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7c24x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:21Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:21 crc kubenswrapper[4753]: I0129 12:07:21.491614 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:21 crc kubenswrapper[4753]: I0129 12:07:21.491677 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:21 crc kubenswrapper[4753]: I0129 12:07:21.491692 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:21 crc kubenswrapper[4753]: I0129 12:07:21.491713 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:21 crc kubenswrapper[4753]: I0129 12:07:21.491725 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:21Z","lastTransitionTime":"2026-01-29T12:07:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:21 crc kubenswrapper[4753]: W0129 12:07:21.515885 4753 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod132de771_c8b4_403f_8916_8b453e7c6fc3.slice/crio-e862437d1218abb0ed14e8e7fd433ba2048b092d1727ed8cf4bf7a6adae43577 WatchSource:0}: Error finding container e862437d1218abb0ed14e8e7fd433ba2048b092d1727ed8cf4bf7a6adae43577: Status 404 returned error can't find the container with id e862437d1218abb0ed14e8e7fd433ba2048b092d1727ed8cf4bf7a6adae43577 Jan 29 12:07:21 crc kubenswrapper[4753]: I0129 12:07:21.520584 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rnbz9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b372210b-6e1b-4a80-b379-7c1d570712f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://346c36936603f4535d5ad3e6b60e451b05b4d376e2531c813f0c900c92caf5fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqzp6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:59Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rnbz9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:21Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:21 crc kubenswrapper[4753]: I0129 12:07:21.546947 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7962d88e183df5c4d91f5767f2007068ea2fdfd5c31d9d6ed609c2ab9ca26999\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:21Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:21 crc kubenswrapper[4753]: I0129 12:07:21.563704 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-5m2jf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0203345d-1e9f-4cfe-bde7-90f87221d1a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fdf3d79084d3e17b1f8cb0b285fd425280f34724c6a441104a2ec52b5217bfdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zzwn6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:58Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-5m2jf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:21Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:21 crc kubenswrapper[4753]: I0129 12:07:21.584791 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"703a6b9d-a15d-4ed3-b00b-db7bd5d42c61\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cab3ab80a8fddbc591cd07fdbed800f525bb8a1b5505bf818ebb59cbcce73003\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7af8be0836a8ff7d08ff48d9487acdedd4b83127538d071d13b3215400c0f8de\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91d57a9cede640bdcec325175598b72dd00398436b730248728c7d9afd545b1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e86597c91848f265cd70410d57ac60eadba1d0322b9b897097366a41c93d8134\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34151f1c4f743e8fa0a2c27e644bbea9c3911e2252501ad8aa95ad3e4346222a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T12:06:54Z\\\",\\\"message\\\":\\\"ed a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3028279136/tls.crt::/tmp/serving-cert-3028279136/tls.key\\\\\\\"\\\\nI0129 12:06:54.190978 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 12:06:54.198044 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 12:06:54.198078 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 12:06:54.198139 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 12:06:54.198147 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 12:06:54.208089 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 12:06:54.208122 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 12:06:54.208128 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 12:06:54.208132 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 12:06:54.208135 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 12:06:54.208139 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 12:06:54.208143 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 12:06:54.208164 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI0129 12:06:54.212380 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-3028279136/tls.crt::/tmp/serving-cert-3028279136/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1769688397\\\\\\\\\\\\\\\" (2026-01-29 12:06:37 +0000 UTC to 2026-02-28 12:06:38 +0000 UTC (now=2026-01-29 12:06:54.212317535 +0000 UTC))\\\\\\\"\\\\nF0129 12:06:54.212498 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e02d977e34888952623f4ced5e073d7bf1ff360f8deb435dc577d2ecf146cb2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:33Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6538a934d097cd5a295952f64961a0a8fb5bfa5ebcd27a0a4dd3638029b9cdb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6538a934d097cd5a295952f64961a0a8fb5bfa5ebcd27a0a4dd3638029b9cdb1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:21Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:21 crc kubenswrapper[4753]: I0129 12:07:21.596010 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:21 crc kubenswrapper[4753]: I0129 12:07:21.596072 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:21 crc kubenswrapper[4753]: I0129 12:07:21.596085 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:21 crc kubenswrapper[4753]: I0129 12:07:21.596105 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:21 crc kubenswrapper[4753]: I0129 12:07:21.596117 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:21Z","lastTransitionTime":"2026-01-29T12:07:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:21 crc kubenswrapper[4753]: I0129 12:07:21.601363 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:21Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:21 crc kubenswrapper[4753]: I0129 12:07:21.615642 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-x8kzr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d753320a-61c2-4c0e-bd48-96d74b352114\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c7a667b7243fc36ff18cb4c017152fff3d5f4eb5e28e306ace44420cd722c5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tgns4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-x8kzr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:21Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:21 crc kubenswrapper[4753]: I0129 12:07:21.633326 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vwcjk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a99ab890-0a5a-4abb-86fb-a3731ff6b2c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd5300a3195f0f4d66a2789abcd65b4ade5652b5d05eb08bfd29932b97137464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd5300a3195f0f4d66a2789abcd65b4ade5652b5d05eb08bfd29932b97137464\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d95b548f1e6e754ff4a6f8d68364586d68b03662d0a679197fd0c99c41745691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d95b548f1e6e754ff4a6f8d68364586d68b03662d0a679197fd0c99c41745691\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b8b1c7640bd4881ba79f3471e309b2d1538aea8d0c8e01d3d7cfc3677d95725a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b8b1c7640bd4881ba79f3471e309b2d1538aea8d0c8e01d3d7cfc3677d95725a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0ba51b6e2180da460faa3599850175db9d7410880ba0214425fa275803f36d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0ba51b6e2180da460faa3599850175db9d7410880ba0214425fa275803f36d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0ed771b481178d6e12c669e122932ef25e1fe09db24c8f025f50a469400af184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0ed771b481178d6e12c669e122932ef25e1fe09db24c8f025f50a469400af184\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:59Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vwcjk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:21Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:21 crc kubenswrapper[4753]: I0129 12:07:21.651337 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-9dtck" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"132de771-c8b4-403f-8916-8b453e7c6fc3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gvjpd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gvjpd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:07:21Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-9dtck\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:21Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:21 crc kubenswrapper[4753]: I0129 12:07:21.671932 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b108c546-a8c1-4bf9-814b-bfb5871b3013\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9842d9d6fed8c87bc20835964d3271b0be3117fedc658e6ef028db422d1ad478\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4fd4169036d3a646c9e5e4bfa8d86a92c557c3d8f77608ae5308dd0b8c45517\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b4a6bbb431e321e54013c96e3cca1537283cbdf65d89a978780550d603663fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://31bd7db7bddd3e16cb455f7af9fd3a9bca49919c8f1ffb676dec83c543ba7763\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:21Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:21 crc kubenswrapper[4753]: I0129 12:07:21.689701 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:21Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:21 crc kubenswrapper[4753]: I0129 12:07:21.699160 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:21 crc kubenswrapper[4753]: I0129 12:07:21.699201 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:21 crc kubenswrapper[4753]: I0129 12:07:21.699214 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:21 crc kubenswrapper[4753]: I0129 12:07:21.699281 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:21 crc kubenswrapper[4753]: I0129 12:07:21.699302 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:21Z","lastTransitionTime":"2026-01-29T12:07:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:21 crc kubenswrapper[4753]: I0129 12:07:21.706972 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:21Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:21 crc kubenswrapper[4753]: I0129 12:07:21.729357 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966eb8e59f82bacf20ebfa1b1d5bf9dc0a9cdb3b430ae83ed9bde106eb3fa521\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea7c92ec02a9cf6ec5fbd8efe22a066dfef9b48f042137964c5dd49578c99079\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:21Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:21 crc kubenswrapper[4753]: I0129 12:07:21.744100 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0310995-a7c7-47c3-ae6c-05daaaba92a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e56ac830346c0f85227bcebc9b76c2af8646767d4678934985d7b1f5846dcb82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-txdmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8391a68625fc3d8fdbc4f16b0b7b25e359798d08ce65ad8f482722910873418a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-txdmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:58Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7c24x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:21Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:21 crc kubenswrapper[4753]: I0129 12:07:21.763358 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rnbz9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b372210b-6e1b-4a80-b379-7c1d570712f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://346c36936603f4535d5ad3e6b60e451b05b4d376e2531c813f0c900c92caf5fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqzp6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:59Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rnbz9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:21Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:21 crc kubenswrapper[4753]: I0129 12:07:21.781452 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5d7ccf10988fd83d31d42be6f82403067a0cc50313c4d5dcddfb528507efffa4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:21Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:21 crc kubenswrapper[4753]: I0129 12:07:21.802139 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:21 crc kubenswrapper[4753]: I0129 12:07:21.802211 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:21 crc kubenswrapper[4753]: I0129 12:07:21.802243 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:21 crc kubenswrapper[4753]: I0129 12:07:21.802265 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:21 crc kubenswrapper[4753]: I0129 12:07:21.802277 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:21Z","lastTransitionTime":"2026-01-29T12:07:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:21 crc kubenswrapper[4753]: I0129 12:07:21.813344 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80bec2ab-0a88-4818-9339-760edda3b07e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f15016e29f1fc14f51b4d01e772071333fd8a885cacc1804b3a03dbcfc93836\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1e6a7d8df812546f17b3e026005c5062086e97e3fa364b3fa22cefec025e4eba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f71859413676dab4a435d43c11ad7cba315a12de744c3c8b161273f6ad36580c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0ec2a21ec299b5eed363668f39a270a8b03ea79b9de3dd23bda0c30579c3de3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91c1df8b6763320498420463a3841f0bfa4fcf753c355f549c3c1e93bf5206a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://05d1e99596530f467ecdae51aec8db191ac045cd85e121fc9835e2f3688f6ae7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://34d19c3c88e356688bc3f0dbe54c1a46ef5e5f5fedbf84c5cb536b3c272decb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e40e5b13dfdc135ea09253d466bacdfc5c2ac3d28eabc76d833d71ac300905c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:07:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-nzkvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:21Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:21 crc kubenswrapper[4753]: I0129 12:07:21.906155 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:21 crc kubenswrapper[4753]: I0129 12:07:21.906213 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:21 crc kubenswrapper[4753]: I0129 12:07:21.906250 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:21 crc kubenswrapper[4753]: I0129 12:07:21.906276 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:21 crc kubenswrapper[4753]: I0129 12:07:21.906292 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:21Z","lastTransitionTime":"2026-01-29T12:07:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:22 crc kubenswrapper[4753]: I0129 12:07:22.009933 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:22 crc kubenswrapper[4753]: I0129 12:07:22.010021 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:22 crc kubenswrapper[4753]: I0129 12:07:22.010110 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:22 crc kubenswrapper[4753]: I0129 12:07:22.010139 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:22 crc kubenswrapper[4753]: I0129 12:07:22.010153 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:22Z","lastTransitionTime":"2026-01-29T12:07:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:22 crc kubenswrapper[4753]: I0129 12:07:22.041630 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-06 03:26:44.175524514 +0000 UTC Jan 29 12:07:22 crc kubenswrapper[4753]: I0129 12:07:22.093754 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-vwcjk" event={"ID":"a99ab890-0a5a-4abb-86fb-a3731ff6b2c1","Type":"ContainerStarted","Data":"1990423fefacfb9b390af88b9af343c1bb4c02014db6bf8226918a7be9f4ad5e"} Jan 29 12:07:22 crc kubenswrapper[4753]: I0129 12:07:22.096016 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-9dtck" event={"ID":"132de771-c8b4-403f-8916-8b453e7c6fc3","Type":"ContainerStarted","Data":"e862437d1218abb0ed14e8e7fd433ba2048b092d1727ed8cf4bf7a6adae43577"} Jan 29 12:07:22 crc kubenswrapper[4753]: I0129 12:07:22.113248 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:22 crc kubenswrapper[4753]: I0129 12:07:22.113279 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:22 crc kubenswrapper[4753]: I0129 12:07:22.113289 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:22 crc kubenswrapper[4753]: I0129 12:07:22.113306 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:22 crc kubenswrapper[4753]: I0129 12:07:22.113315 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:22Z","lastTransitionTime":"2026-01-29T12:07:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:22 crc kubenswrapper[4753]: I0129 12:07:22.115854 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rnbz9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b372210b-6e1b-4a80-b379-7c1d570712f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://346c36936603f4535d5ad3e6b60e451b05b4d376e2531c813f0c900c92caf5fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqzp6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:59Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rnbz9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:22Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:22 crc kubenswrapper[4753]: I0129 12:07:22.130273 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-9dtck" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"132de771-c8b4-403f-8916-8b453e7c6fc3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gvjpd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gvjpd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:07:21Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-9dtck\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:22Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:22 crc kubenswrapper[4753]: I0129 12:07:22.147442 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b108c546-a8c1-4bf9-814b-bfb5871b3013\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9842d9d6fed8c87bc20835964d3271b0be3117fedc658e6ef028db422d1ad478\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4fd4169036d3a646c9e5e4bfa8d86a92c557c3d8f77608ae5308dd0b8c45517\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b4a6bbb431e321e54013c96e3cca1537283cbdf65d89a978780550d603663fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://31bd7db7bddd3e16cb455f7af9fd3a9bca49919c8f1ffb676dec83c543ba7763\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:22Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:22 crc kubenswrapper[4753]: I0129 12:07:22.166509 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:22Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:22 crc kubenswrapper[4753]: I0129 12:07:22.189216 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:22Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:22 crc kubenswrapper[4753]: I0129 12:07:22.216308 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966eb8e59f82bacf20ebfa1b1d5bf9dc0a9cdb3b430ae83ed9bde106eb3fa521\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea7c92ec02a9cf6ec5fbd8efe22a066dfef9b48f042137964c5dd49578c99079\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:22Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:22 crc kubenswrapper[4753]: I0129 12:07:22.216604 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:22 crc kubenswrapper[4753]: I0129 12:07:22.216622 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:22 crc kubenswrapper[4753]: I0129 12:07:22.216631 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:22 crc kubenswrapper[4753]: I0129 12:07:22.216648 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:22 crc kubenswrapper[4753]: I0129 12:07:22.216659 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:22Z","lastTransitionTime":"2026-01-29T12:07:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:22 crc kubenswrapper[4753]: I0129 12:07:22.233926 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0310995-a7c7-47c3-ae6c-05daaaba92a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e56ac830346c0f85227bcebc9b76c2af8646767d4678934985d7b1f5846dcb82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-txdmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8391a68625fc3d8fdbc4f16b0b7b25e359798d08ce65ad8f482722910873418a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-txdmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:58Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7c24x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:22Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:22 crc kubenswrapper[4753]: I0129 12:07:22.250858 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5d7ccf10988fd83d31d42be6f82403067a0cc50313c4d5dcddfb528507efffa4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:22Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:22 crc kubenswrapper[4753]: I0129 12:07:22.377086 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:22 crc kubenswrapper[4753]: I0129 12:07:22.377140 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:22 crc kubenswrapper[4753]: I0129 12:07:22.377150 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:22 crc kubenswrapper[4753]: I0129 12:07:22.377170 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:22 crc kubenswrapper[4753]: I0129 12:07:22.377181 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:22Z","lastTransitionTime":"2026-01-29T12:07:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:22 crc kubenswrapper[4753]: I0129 12:07:22.411520 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80bec2ab-0a88-4818-9339-760edda3b07e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f15016e29f1fc14f51b4d01e772071333fd8a885cacc1804b3a03dbcfc93836\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1e6a7d8df812546f17b3e026005c5062086e97e3fa364b3fa22cefec025e4eba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f71859413676dab4a435d43c11ad7cba315a12de744c3c8b161273f6ad36580c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0ec2a21ec299b5eed363668f39a270a8b03ea79b9de3dd23bda0c30579c3de3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91c1df8b6763320498420463a3841f0bfa4fcf753c355f549c3c1e93bf5206a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://05d1e99596530f467ecdae51aec8db191ac045cd85e121fc9835e2f3688f6ae7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://34d19c3c88e356688bc3f0dbe54c1a46ef5e5f5fedbf84c5cb536b3c272decb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e40e5b13dfdc135ea09253d466bacdfc5c2ac3d28eabc76d833d71ac300905c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:07:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-nzkvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:22Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:22 crc kubenswrapper[4753]: I0129 12:07:22.429909 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7962d88e183df5c4d91f5767f2007068ea2fdfd5c31d9d6ed609c2ab9ca26999\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:22Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:22 crc kubenswrapper[4753]: I0129 12:07:22.443075 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-5m2jf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0203345d-1e9f-4cfe-bde7-90f87221d1a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fdf3d79084d3e17b1f8cb0b285fd425280f34724c6a441104a2ec52b5217bfdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zzwn6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:58Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-5m2jf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:22Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:22 crc kubenswrapper[4753]: I0129 12:07:22.463516 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"703a6b9d-a15d-4ed3-b00b-db7bd5d42c61\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cab3ab80a8fddbc591cd07fdbed800f525bb8a1b5505bf818ebb59cbcce73003\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7af8be0836a8ff7d08ff48d9487acdedd4b83127538d071d13b3215400c0f8de\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91d57a9cede640bdcec325175598b72dd00398436b730248728c7d9afd545b1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e86597c91848f265cd70410d57ac60eadba1d0322b9b897097366a41c93d8134\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34151f1c4f743e8fa0a2c27e644bbea9c3911e2252501ad8aa95ad3e4346222a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T12:06:54Z\\\",\\\"message\\\":\\\"ed a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3028279136/tls.crt::/tmp/serving-cert-3028279136/tls.key\\\\\\\"\\\\nI0129 12:06:54.190978 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 12:06:54.198044 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 12:06:54.198078 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 12:06:54.198139 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 12:06:54.198147 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 12:06:54.208089 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 12:06:54.208122 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 12:06:54.208128 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 12:06:54.208132 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 12:06:54.208135 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 12:06:54.208139 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 12:06:54.208143 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 12:06:54.208164 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI0129 12:06:54.212380 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-3028279136/tls.crt::/tmp/serving-cert-3028279136/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1769688397\\\\\\\\\\\\\\\" (2026-01-29 12:06:37 +0000 UTC to 2026-02-28 12:06:38 +0000 UTC (now=2026-01-29 12:06:54.212317535 +0000 UTC))\\\\\\\"\\\\nF0129 12:06:54.212498 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e02d977e34888952623f4ced5e073d7bf1ff360f8deb435dc577d2ecf146cb2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:33Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6538a934d097cd5a295952f64961a0a8fb5bfa5ebcd27a0a4dd3638029b9cdb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6538a934d097cd5a295952f64961a0a8fb5bfa5ebcd27a0a4dd3638029b9cdb1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:22Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:22 crc kubenswrapper[4753]: I0129 12:07:22.479730 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:22 crc kubenswrapper[4753]: I0129 12:07:22.479796 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:22 crc kubenswrapper[4753]: I0129 12:07:22.479806 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:22 crc kubenswrapper[4753]: I0129 12:07:22.479828 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:22 crc kubenswrapper[4753]: I0129 12:07:22.479838 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:22Z","lastTransitionTime":"2026-01-29T12:07:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:22 crc kubenswrapper[4753]: I0129 12:07:22.484208 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:22Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:22 crc kubenswrapper[4753]: I0129 12:07:22.628248 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-x8kzr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d753320a-61c2-4c0e-bd48-96d74b352114\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c7a667b7243fc36ff18cb4c017152fff3d5f4eb5e28e306ace44420cd722c5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tgns4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-x8kzr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:22Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:22 crc kubenswrapper[4753]: I0129 12:07:22.630983 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:22 crc kubenswrapper[4753]: I0129 12:07:22.631024 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:22 crc kubenswrapper[4753]: I0129 12:07:22.631040 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:22 crc kubenswrapper[4753]: I0129 12:07:22.631068 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:22 crc kubenswrapper[4753]: I0129 12:07:22.631088 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:22Z","lastTransitionTime":"2026-01-29T12:07:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:22 crc kubenswrapper[4753]: I0129 12:07:22.649265 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vwcjk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a99ab890-0a5a-4abb-86fb-a3731ff6b2c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd5300a3195f0f4d66a2789abcd65b4ade5652b5d05eb08bfd29932b97137464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd5300a3195f0f4d66a2789abcd65b4ade5652b5d05eb08bfd29932b97137464\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d95b548f1e6e754ff4a6f8d68364586d68b03662d0a679197fd0c99c41745691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d95b548f1e6e754ff4a6f8d68364586d68b03662d0a679197fd0c99c41745691\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b8b1c7640bd4881ba79f3471e309b2d1538aea8d0c8e01d3d7cfc3677d95725a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b8b1c7640bd4881ba79f3471e309b2d1538aea8d0c8e01d3d7cfc3677d95725a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0ba51b6e2180da460faa3599850175db9d7410880ba0214425fa275803f36d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0ba51b6e2180da460faa3599850175db9d7410880ba0214425fa275803f36d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0ed771b481178d6e12c669e122932ef25e1fe09db24c8f025f50a469400af184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0ed771b481178d6e12c669e122932ef25e1fe09db24c8f025f50a469400af184\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1990423fefacfb9b390af88b9af343c1bb4c02014db6bf8226918a7be9f4ad5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:59Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vwcjk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:22Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:22 crc kubenswrapper[4753]: I0129 12:07:22.734453 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:22 crc kubenswrapper[4753]: I0129 12:07:22.734519 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:22 crc kubenswrapper[4753]: I0129 12:07:22.734528 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:22 crc kubenswrapper[4753]: I0129 12:07:22.734547 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:22 crc kubenswrapper[4753]: I0129 12:07:22.734557 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:22Z","lastTransitionTime":"2026-01-29T12:07:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:22 crc kubenswrapper[4753]: I0129 12:07:22.747455 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:22 crc kubenswrapper[4753]: I0129 12:07:22.747522 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:22 crc kubenswrapper[4753]: I0129 12:07:22.747539 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:22 crc kubenswrapper[4753]: I0129 12:07:22.747563 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:22 crc kubenswrapper[4753]: I0129 12:07:22.747575 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:22Z","lastTransitionTime":"2026-01-29T12:07:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:22 crc kubenswrapper[4753]: E0129 12:07:22.765538 4753 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:22Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:22Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:22Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:22Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:22Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:22Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7536a030-b66d-4df9-a1ec-9890f7ad99e7\\\",\\\"systemUUID\\\":\\\"ce89c1ce-259c-4b78-b348-3ef96afb6944\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:22Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:22 crc kubenswrapper[4753]: I0129 12:07:22.769891 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:22 crc kubenswrapper[4753]: I0129 12:07:22.769946 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:22 crc kubenswrapper[4753]: I0129 12:07:22.769959 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:22 crc kubenswrapper[4753]: I0129 12:07:22.769981 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:22 crc kubenswrapper[4753]: I0129 12:07:22.769994 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:22Z","lastTransitionTime":"2026-01-29T12:07:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:22 crc kubenswrapper[4753]: E0129 12:07:22.785673 4753 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:22Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:22Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:22Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:22Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:22Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:22Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7536a030-b66d-4df9-a1ec-9890f7ad99e7\\\",\\\"systemUUID\\\":\\\"ce89c1ce-259c-4b78-b348-3ef96afb6944\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:22Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:22 crc kubenswrapper[4753]: I0129 12:07:22.789899 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:22 crc kubenswrapper[4753]: I0129 12:07:22.789965 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:22 crc kubenswrapper[4753]: I0129 12:07:22.789985 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:22 crc kubenswrapper[4753]: I0129 12:07:22.790008 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:22 crc kubenswrapper[4753]: I0129 12:07:22.790020 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:22Z","lastTransitionTime":"2026-01-29T12:07:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:22 crc kubenswrapper[4753]: E0129 12:07:22.806403 4753 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:22Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:22Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:22Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:22Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:22Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:22Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7536a030-b66d-4df9-a1ec-9890f7ad99e7\\\",\\\"systemUUID\\\":\\\"ce89c1ce-259c-4b78-b348-3ef96afb6944\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:22Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:22 crc kubenswrapper[4753]: I0129 12:07:22.811493 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:22 crc kubenswrapper[4753]: I0129 12:07:22.811549 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:22 crc kubenswrapper[4753]: I0129 12:07:22.811563 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:22 crc kubenswrapper[4753]: I0129 12:07:22.811586 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:22 crc kubenswrapper[4753]: I0129 12:07:22.811599 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:22Z","lastTransitionTime":"2026-01-29T12:07:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:22 crc kubenswrapper[4753]: I0129 12:07:22.888129 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 12:07:22 crc kubenswrapper[4753]: I0129 12:07:22.888268 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 12:07:22 crc kubenswrapper[4753]: I0129 12:07:22.888445 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 12:07:22 crc kubenswrapper[4753]: E0129 12:07:22.888706 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 12:07:22 crc kubenswrapper[4753]: E0129 12:07:22.889213 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 12:07:22 crc kubenswrapper[4753]: E0129 12:07:22.889929 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 12:07:23 crc kubenswrapper[4753]: I0129 12:07:23.198479 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-17 23:16:01.241906408 +0000 UTC Jan 29 12:07:23 crc kubenswrapper[4753]: E0129 12:07:23.205395 4753 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:22Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:22Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:22Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:22Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:22Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:22Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7536a030-b66d-4df9-a1ec-9890f7ad99e7\\\",\\\"systemUUID\\\":\\\"ce89c1ce-259c-4b78-b348-3ef96afb6944\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:22Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:23 crc kubenswrapper[4753]: I0129 12:07:23.206173 4753 generic.go:334] "Generic (PLEG): container finished" podID="a99ab890-0a5a-4abb-86fb-a3731ff6b2c1" containerID="1990423fefacfb9b390af88b9af343c1bb4c02014db6bf8226918a7be9f4ad5e" exitCode=0 Jan 29 12:07:23 crc kubenswrapper[4753]: I0129 12:07:23.206247 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-vwcjk" event={"ID":"a99ab890-0a5a-4abb-86fb-a3731ff6b2c1","Type":"ContainerDied","Data":"1990423fefacfb9b390af88b9af343c1bb4c02014db6bf8226918a7be9f4ad5e"} Jan 29 12:07:23 crc kubenswrapper[4753]: I0129 12:07:23.214054 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-9dtck" event={"ID":"132de771-c8b4-403f-8916-8b453e7c6fc3","Type":"ContainerStarted","Data":"829af7b1f520503a0f36c60dd12b25d45c3831ba0cbb61f4f0a80582cb46da1e"} Jan 29 12:07:23 crc kubenswrapper[4753]: I0129 12:07:23.214090 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:23 crc kubenswrapper[4753]: I0129 12:07:23.214157 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:23 crc kubenswrapper[4753]: I0129 12:07:23.214175 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:23 crc kubenswrapper[4753]: I0129 12:07:23.214198 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:23 crc kubenswrapper[4753]: I0129 12:07:23.214212 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:23Z","lastTransitionTime":"2026-01-29T12:07:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:23 crc kubenswrapper[4753]: I0129 12:07:23.214112 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-9dtck" event={"ID":"132de771-c8b4-403f-8916-8b453e7c6fc3","Type":"ContainerStarted","Data":"30aac9e31bfe76eba96a50a30b03419b0b23ddd888485d609f37b84af28b8d3a"} Jan 29 12:07:23 crc kubenswrapper[4753]: I0129 12:07:23.233249 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/network-metrics-daemon-p6m5g"] Jan 29 12:07:23 crc kubenswrapper[4753]: I0129 12:07:23.234416 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-p6m5g" Jan 29 12:07:23 crc kubenswrapper[4753]: E0129 12:07:23.241185 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-p6m5g" podUID="2412fa08-e643-4225-b494-eb999ea93fce" Jan 29 12:07:23 crc kubenswrapper[4753]: E0129 12:07:23.254584 4753 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7536a030-b66d-4df9-a1ec-9890f7ad99e7\\\",\\\"systemUUID\\\":\\\"ce89c1ce-259c-4b78-b348-3ef96afb6944\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:23Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:23 crc kubenswrapper[4753]: E0129 12:07:23.254761 4753 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 29 12:07:23 crc kubenswrapper[4753]: I0129 12:07:23.255257 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7962d88e183df5c4d91f5767f2007068ea2fdfd5c31d9d6ed609c2ab9ca26999\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:23Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:23 crc kubenswrapper[4753]: I0129 12:07:23.258873 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:23 crc kubenswrapper[4753]: I0129 12:07:23.258920 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:23 crc kubenswrapper[4753]: I0129 12:07:23.258933 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:23 crc kubenswrapper[4753]: I0129 12:07:23.258957 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:23 crc kubenswrapper[4753]: I0129 12:07:23.258975 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:23Z","lastTransitionTime":"2026-01-29T12:07:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:23 crc kubenswrapper[4753]: I0129 12:07:23.281431 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-5m2jf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0203345d-1e9f-4cfe-bde7-90f87221d1a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fdf3d79084d3e17b1f8cb0b285fd425280f34724c6a441104a2ec52b5217bfdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zzwn6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:58Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-5m2jf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:23Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:23 crc kubenswrapper[4753]: I0129 12:07:23.300308 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2412fa08-e643-4225-b494-eb999ea93fce-metrics-certs\") pod \"network-metrics-daemon-p6m5g\" (UID: \"2412fa08-e643-4225-b494-eb999ea93fce\") " pod="openshift-multus/network-metrics-daemon-p6m5g" Jan 29 12:07:23 crc kubenswrapper[4753]: I0129 12:07:23.300469 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zjjxc\" (UniqueName: \"kubernetes.io/projected/2412fa08-e643-4225-b494-eb999ea93fce-kube-api-access-zjjxc\") pod \"network-metrics-daemon-p6m5g\" (UID: \"2412fa08-e643-4225-b494-eb999ea93fce\") " pod="openshift-multus/network-metrics-daemon-p6m5g" Jan 29 12:07:23 crc kubenswrapper[4753]: I0129 12:07:23.312322 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-x8kzr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d753320a-61c2-4c0e-bd48-96d74b352114\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c7a667b7243fc36ff18cb4c017152fff3d5f4eb5e28e306ace44420cd722c5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tgns4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-x8kzr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:23Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:23 crc kubenswrapper[4753]: I0129 12:07:23.331325 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vwcjk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a99ab890-0a5a-4abb-86fb-a3731ff6b2c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd5300a3195f0f4d66a2789abcd65b4ade5652b5d05eb08bfd29932b97137464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd5300a3195f0f4d66a2789abcd65b4ade5652b5d05eb08bfd29932b97137464\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d95b548f1e6e754ff4a6f8d68364586d68b03662d0a679197fd0c99c41745691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d95b548f1e6e754ff4a6f8d68364586d68b03662d0a679197fd0c99c41745691\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b8b1c7640bd4881ba79f3471e309b2d1538aea8d0c8e01d3d7cfc3677d95725a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b8b1c7640bd4881ba79f3471e309b2d1538aea8d0c8e01d3d7cfc3677d95725a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0ba51b6e2180da460faa3599850175db9d7410880ba0214425fa275803f36d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0ba51b6e2180da460faa3599850175db9d7410880ba0214425fa275803f36d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0ed771b481178d6e12c669e122932ef25e1fe09db24c8f025f50a469400af184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0ed771b481178d6e12c669e122932ef25e1fe09db24c8f025f50a469400af184\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1990423fefacfb9b390af88b9af343c1bb4c02014db6bf8226918a7be9f4ad5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1990423fefacfb9b390af88b9af343c1bb4c02014db6bf8226918a7be9f4ad5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:59Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vwcjk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:23Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:23 crc kubenswrapper[4753]: I0129 12:07:23.360918 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"703a6b9d-a15d-4ed3-b00b-db7bd5d42c61\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cab3ab80a8fddbc591cd07fdbed800f525bb8a1b5505bf818ebb59cbcce73003\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7af8be0836a8ff7d08ff48d9487acdedd4b83127538d071d13b3215400c0f8de\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91d57a9cede640bdcec325175598b72dd00398436b730248728c7d9afd545b1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e86597c91848f265cd70410d57ac60eadba1d0322b9b897097366a41c93d8134\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34151f1c4f743e8fa0a2c27e644bbea9c3911e2252501ad8aa95ad3e4346222a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T12:06:54Z\\\",\\\"message\\\":\\\"ed a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3028279136/tls.crt::/tmp/serving-cert-3028279136/tls.key\\\\\\\"\\\\nI0129 12:06:54.190978 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 12:06:54.198044 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 12:06:54.198078 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 12:06:54.198139 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 12:06:54.198147 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 12:06:54.208089 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 12:06:54.208122 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 12:06:54.208128 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 12:06:54.208132 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 12:06:54.208135 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 12:06:54.208139 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 12:06:54.208143 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 12:06:54.208164 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI0129 12:06:54.212380 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-3028279136/tls.crt::/tmp/serving-cert-3028279136/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1769688397\\\\\\\\\\\\\\\" (2026-01-29 12:06:37 +0000 UTC to 2026-02-28 12:06:38 +0000 UTC (now=2026-01-29 12:06:54.212317535 +0000 UTC))\\\\\\\"\\\\nF0129 12:06:54.212498 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e02d977e34888952623f4ced5e073d7bf1ff360f8deb435dc577d2ecf146cb2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:33Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6538a934d097cd5a295952f64961a0a8fb5bfa5ebcd27a0a4dd3638029b9cdb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6538a934d097cd5a295952f64961a0a8fb5bfa5ebcd27a0a4dd3638029b9cdb1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:23Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:23 crc kubenswrapper[4753]: I0129 12:07:23.362785 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:23 crc kubenswrapper[4753]: I0129 12:07:23.362814 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:23 crc kubenswrapper[4753]: I0129 12:07:23.362823 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:23 crc kubenswrapper[4753]: I0129 12:07:23.362840 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:23 crc kubenswrapper[4753]: I0129 12:07:23.362848 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:23Z","lastTransitionTime":"2026-01-29T12:07:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:23 crc kubenswrapper[4753]: I0129 12:07:23.376879 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:23Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:23 crc kubenswrapper[4753]: I0129 12:07:23.393707 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:23Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:23 crc kubenswrapper[4753]: I0129 12:07:23.402424 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zjjxc\" (UniqueName: \"kubernetes.io/projected/2412fa08-e643-4225-b494-eb999ea93fce-kube-api-access-zjjxc\") pod \"network-metrics-daemon-p6m5g\" (UID: \"2412fa08-e643-4225-b494-eb999ea93fce\") " pod="openshift-multus/network-metrics-daemon-p6m5g" Jan 29 12:07:23 crc kubenswrapper[4753]: I0129 12:07:23.402507 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2412fa08-e643-4225-b494-eb999ea93fce-metrics-certs\") pod \"network-metrics-daemon-p6m5g\" (UID: \"2412fa08-e643-4225-b494-eb999ea93fce\") " pod="openshift-multus/network-metrics-daemon-p6m5g" Jan 29 12:07:23 crc kubenswrapper[4753]: E0129 12:07:23.403444 4753 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 29 12:07:23 crc kubenswrapper[4753]: E0129 12:07:23.403606 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2412fa08-e643-4225-b494-eb999ea93fce-metrics-certs podName:2412fa08-e643-4225-b494-eb999ea93fce nodeName:}" failed. No retries permitted until 2026-01-29 12:07:23.903551785 +0000 UTC m=+58.155633230 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/2412fa08-e643-4225-b494-eb999ea93fce-metrics-certs") pod "network-metrics-daemon-p6m5g" (UID: "2412fa08-e643-4225-b494-eb999ea93fce") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 29 12:07:23 crc kubenswrapper[4753]: I0129 12:07:23.413191 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966eb8e59f82bacf20ebfa1b1d5bf9dc0a9cdb3b430ae83ed9bde106eb3fa521\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea7c92ec02a9cf6ec5fbd8efe22a066dfef9b48f042137964c5dd49578c99079\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:23Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:23 crc kubenswrapper[4753]: I0129 12:07:23.435566 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zjjxc\" (UniqueName: \"kubernetes.io/projected/2412fa08-e643-4225-b494-eb999ea93fce-kube-api-access-zjjxc\") pod \"network-metrics-daemon-p6m5g\" (UID: \"2412fa08-e643-4225-b494-eb999ea93fce\") " pod="openshift-multus/network-metrics-daemon-p6m5g" Jan 29 12:07:23 crc kubenswrapper[4753]: I0129 12:07:23.436511 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0310995-a7c7-47c3-ae6c-05daaaba92a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e56ac830346c0f85227bcebc9b76c2af8646767d4678934985d7b1f5846dcb82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-txdmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8391a68625fc3d8fdbc4f16b0b7b25e359798d08ce65ad8f482722910873418a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-txdmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:58Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7c24x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:23Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:23 crc kubenswrapper[4753]: I0129 12:07:23.461208 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rnbz9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b372210b-6e1b-4a80-b379-7c1d570712f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://346c36936603f4535d5ad3e6b60e451b05b4d376e2531c813f0c900c92caf5fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqzp6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:59Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rnbz9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:23Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:23 crc kubenswrapper[4753]: I0129 12:07:23.466571 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:23 crc kubenswrapper[4753]: I0129 12:07:23.469593 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:23 crc kubenswrapper[4753]: I0129 12:07:23.470068 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:23 crc kubenswrapper[4753]: I0129 12:07:23.470266 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:23 crc kubenswrapper[4753]: I0129 12:07:23.470437 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:23Z","lastTransitionTime":"2026-01-29T12:07:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:23 crc kubenswrapper[4753]: I0129 12:07:23.487831 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-9dtck" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"132de771-c8b4-403f-8916-8b453e7c6fc3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gvjpd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gvjpd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:07:21Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-9dtck\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:23Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:23 crc kubenswrapper[4753]: I0129 12:07:23.510363 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b108c546-a8c1-4bf9-814b-bfb5871b3013\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9842d9d6fed8c87bc20835964d3271b0be3117fedc658e6ef028db422d1ad478\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4fd4169036d3a646c9e5e4bfa8d86a92c557c3d8f77608ae5308dd0b8c45517\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b4a6bbb431e321e54013c96e3cca1537283cbdf65d89a978780550d603663fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://31bd7db7bddd3e16cb455f7af9fd3a9bca49919c8f1ffb676dec83c543ba7763\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:23Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:23 crc kubenswrapper[4753]: I0129 12:07:23.539541 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:23Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:23 crc kubenswrapper[4753]: I0129 12:07:23.561397 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5d7ccf10988fd83d31d42be6f82403067a0cc50313c4d5dcddfb528507efffa4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:23Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:23 crc kubenswrapper[4753]: I0129 12:07:23.575732 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:23 crc kubenswrapper[4753]: I0129 12:07:23.575789 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:23 crc kubenswrapper[4753]: I0129 12:07:23.575806 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:23 crc kubenswrapper[4753]: I0129 12:07:23.575863 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:23 crc kubenswrapper[4753]: I0129 12:07:23.575890 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:23Z","lastTransitionTime":"2026-01-29T12:07:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:23 crc kubenswrapper[4753]: I0129 12:07:23.586600 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80bec2ab-0a88-4818-9339-760edda3b07e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f15016e29f1fc14f51b4d01e772071333fd8a885cacc1804b3a03dbcfc93836\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1e6a7d8df812546f17b3e026005c5062086e97e3fa364b3fa22cefec025e4eba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f71859413676dab4a435d43c11ad7cba315a12de744c3c8b161273f6ad36580c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0ec2a21ec299b5eed363668f39a270a8b03ea79b9de3dd23bda0c30579c3de3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91c1df8b6763320498420463a3841f0bfa4fcf753c355f549c3c1e93bf5206a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://05d1e99596530f467ecdae51aec8db191ac045cd85e121fc9835e2f3688f6ae7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://34d19c3c88e356688bc3f0dbe54c1a46ef5e5f5fedbf84c5cb536b3c272decb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e40e5b13dfdc135ea09253d466bacdfc5c2ac3d28eabc76d833d71ac300905c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:07:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-nzkvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:23Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:23 crc kubenswrapper[4753]: I0129 12:07:23.609283 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"703a6b9d-a15d-4ed3-b00b-db7bd5d42c61\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cab3ab80a8fddbc591cd07fdbed800f525bb8a1b5505bf818ebb59cbcce73003\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7af8be0836a8ff7d08ff48d9487acdedd4b83127538d071d13b3215400c0f8de\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91d57a9cede640bdcec325175598b72dd00398436b730248728c7d9afd545b1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e86597c91848f265cd70410d57ac60eadba1d0322b9b897097366a41c93d8134\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34151f1c4f743e8fa0a2c27e644bbea9c3911e2252501ad8aa95ad3e4346222a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T12:06:54Z\\\",\\\"message\\\":\\\"ed a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3028279136/tls.crt::/tmp/serving-cert-3028279136/tls.key\\\\\\\"\\\\nI0129 12:06:54.190978 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 12:06:54.198044 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 12:06:54.198078 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 12:06:54.198139 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 12:06:54.198147 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 12:06:54.208089 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 12:06:54.208122 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 12:06:54.208128 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 12:06:54.208132 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 12:06:54.208135 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 12:06:54.208139 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 12:06:54.208143 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 12:06:54.208164 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI0129 12:06:54.212380 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-3028279136/tls.crt::/tmp/serving-cert-3028279136/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1769688397\\\\\\\\\\\\\\\" (2026-01-29 12:06:37 +0000 UTC to 2026-02-28 12:06:38 +0000 UTC (now=2026-01-29 12:06:54.212317535 +0000 UTC))\\\\\\\"\\\\nF0129 12:06:54.212498 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e02d977e34888952623f4ced5e073d7bf1ff360f8deb435dc577d2ecf146cb2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:33Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6538a934d097cd5a295952f64961a0a8fb5bfa5ebcd27a0a4dd3638029b9cdb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6538a934d097cd5a295952f64961a0a8fb5bfa5ebcd27a0a4dd3638029b9cdb1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:23Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:23 crc kubenswrapper[4753]: I0129 12:07:23.628554 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:23Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:23 crc kubenswrapper[4753]: I0129 12:07:23.652822 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-x8kzr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d753320a-61c2-4c0e-bd48-96d74b352114\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c7a667b7243fc36ff18cb4c017152fff3d5f4eb5e28e306ace44420cd722c5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tgns4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-x8kzr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:23Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:23 crc kubenswrapper[4753]: I0129 12:07:23.675486 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vwcjk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a99ab890-0a5a-4abb-86fb-a3731ff6b2c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd5300a3195f0f4d66a2789abcd65b4ade5652b5d05eb08bfd29932b97137464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd5300a3195f0f4d66a2789abcd65b4ade5652b5d05eb08bfd29932b97137464\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d95b548f1e6e754ff4a6f8d68364586d68b03662d0a679197fd0c99c41745691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d95b548f1e6e754ff4a6f8d68364586d68b03662d0a679197fd0c99c41745691\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b8b1c7640bd4881ba79f3471e309b2d1538aea8d0c8e01d3d7cfc3677d95725a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b8b1c7640bd4881ba79f3471e309b2d1538aea8d0c8e01d3d7cfc3677d95725a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0ba51b6e2180da460faa3599850175db9d7410880ba0214425fa275803f36d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0ba51b6e2180da460faa3599850175db9d7410880ba0214425fa275803f36d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0ed771b481178d6e12c669e122932ef25e1fe09db24c8f025f50a469400af184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0ed771b481178d6e12c669e122932ef25e1fe09db24c8f025f50a469400af184\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1990423fefacfb9b390af88b9af343c1bb4c02014db6bf8226918a7be9f4ad5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1990423fefacfb9b390af88b9af343c1bb4c02014db6bf8226918a7be9f4ad5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:59Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vwcjk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:23Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:23 crc kubenswrapper[4753]: I0129 12:07:23.678847 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:23 crc kubenswrapper[4753]: I0129 12:07:23.678937 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:23 crc kubenswrapper[4753]: I0129 12:07:23.678949 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:23 crc kubenswrapper[4753]: I0129 12:07:23.678973 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:23 crc kubenswrapper[4753]: I0129 12:07:23.678993 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:23Z","lastTransitionTime":"2026-01-29T12:07:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:23 crc kubenswrapper[4753]: I0129 12:07:23.695103 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0310995-a7c7-47c3-ae6c-05daaaba92a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e56ac830346c0f85227bcebc9b76c2af8646767d4678934985d7b1f5846dcb82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-txdmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8391a68625fc3d8fdbc4f16b0b7b25e359798d08ce65ad8f482722910873418a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-txdmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:58Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7c24x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:23Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:23 crc kubenswrapper[4753]: I0129 12:07:23.714254 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rnbz9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b372210b-6e1b-4a80-b379-7c1d570712f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://346c36936603f4535d5ad3e6b60e451b05b4d376e2531c813f0c900c92caf5fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqzp6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:59Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rnbz9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:23Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:23 crc kubenswrapper[4753]: I0129 12:07:23.729111 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-9dtck" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"132de771-c8b4-403f-8916-8b453e7c6fc3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://30aac9e31bfe76eba96a50a30b03419b0b23ddd888485d609f37b84af28b8d3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gvjpd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://829af7b1f520503a0f36c60dd12b25d45c3831ba0cbb61f4f0a80582cb46da1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gvjpd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:07:21Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-9dtck\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:23Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:23 crc kubenswrapper[4753]: I0129 12:07:23.746626 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b108c546-a8c1-4bf9-814b-bfb5871b3013\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9842d9d6fed8c87bc20835964d3271b0be3117fedc658e6ef028db422d1ad478\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4fd4169036d3a646c9e5e4bfa8d86a92c557c3d8f77608ae5308dd0b8c45517\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b4a6bbb431e321e54013c96e3cca1537283cbdf65d89a978780550d603663fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://31bd7db7bddd3e16cb455f7af9fd3a9bca49919c8f1ffb676dec83c543ba7763\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:23Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:23 crc kubenswrapper[4753]: I0129 12:07:23.765324 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:23Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:23 crc kubenswrapper[4753]: I0129 12:07:23.782294 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:23 crc kubenswrapper[4753]: I0129 12:07:23.782347 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:23 crc kubenswrapper[4753]: I0129 12:07:23.782357 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:23 crc kubenswrapper[4753]: I0129 12:07:23.782382 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:23 crc kubenswrapper[4753]: I0129 12:07:23.782391 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:23Z","lastTransitionTime":"2026-01-29T12:07:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:23 crc kubenswrapper[4753]: I0129 12:07:23.784362 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:23Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:23 crc kubenswrapper[4753]: I0129 12:07:23.805649 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966eb8e59f82bacf20ebfa1b1d5bf9dc0a9cdb3b430ae83ed9bde106eb3fa521\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea7c92ec02a9cf6ec5fbd8efe22a066dfef9b48f042137964c5dd49578c99079\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:23Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:23 crc kubenswrapper[4753]: I0129 12:07:23.837352 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5d7ccf10988fd83d31d42be6f82403067a0cc50313c4d5dcddfb528507efffa4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:23Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:23 crc kubenswrapper[4753]: I0129 12:07:23.878689 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80bec2ab-0a88-4818-9339-760edda3b07e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f15016e29f1fc14f51b4d01e772071333fd8a885cacc1804b3a03dbcfc93836\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1e6a7d8df812546f17b3e026005c5062086e97e3fa364b3fa22cefec025e4eba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f71859413676dab4a435d43c11ad7cba315a12de744c3c8b161273f6ad36580c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0ec2a21ec299b5eed363668f39a270a8b03ea79b9de3dd23bda0c30579c3de3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91c1df8b6763320498420463a3841f0bfa4fcf753c355f549c3c1e93bf5206a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://05d1e99596530f467ecdae51aec8db191ac045cd85e121fc9835e2f3688f6ae7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://34d19c3c88e356688bc3f0dbe54c1a46ef5e5f5fedbf84c5cb536b3c272decb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e40e5b13dfdc135ea09253d466bacdfc5c2ac3d28eabc76d833d71ac300905c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:07:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-nzkvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:23Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:23 crc kubenswrapper[4753]: I0129 12:07:23.885576 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:23 crc kubenswrapper[4753]: I0129 12:07:23.885645 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:23 crc kubenswrapper[4753]: I0129 12:07:23.885674 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:23 crc kubenswrapper[4753]: I0129 12:07:23.885708 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:23 crc kubenswrapper[4753]: I0129 12:07:23.885768 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:23Z","lastTransitionTime":"2026-01-29T12:07:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:23 crc kubenswrapper[4753]: I0129 12:07:23.946403 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7962d88e183df5c4d91f5767f2007068ea2fdfd5c31d9d6ed609c2ab9ca26999\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:23Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:23 crc kubenswrapper[4753]: I0129 12:07:23.948000 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2412fa08-e643-4225-b494-eb999ea93fce-metrics-certs\") pod \"network-metrics-daemon-p6m5g\" (UID: \"2412fa08-e643-4225-b494-eb999ea93fce\") " pod="openshift-multus/network-metrics-daemon-p6m5g" Jan 29 12:07:23 crc kubenswrapper[4753]: E0129 12:07:23.948181 4753 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 29 12:07:23 crc kubenswrapper[4753]: E0129 12:07:23.948275 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2412fa08-e643-4225-b494-eb999ea93fce-metrics-certs podName:2412fa08-e643-4225-b494-eb999ea93fce nodeName:}" failed. No retries permitted until 2026-01-29 12:07:24.948247616 +0000 UTC m=+59.200329071 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/2412fa08-e643-4225-b494-eb999ea93fce-metrics-certs") pod "network-metrics-daemon-p6m5g" (UID: "2412fa08-e643-4225-b494-eb999ea93fce") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 29 12:07:23 crc kubenswrapper[4753]: I0129 12:07:23.966429 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-5m2jf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0203345d-1e9f-4cfe-bde7-90f87221d1a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fdf3d79084d3e17b1f8cb0b285fd425280f34724c6a441104a2ec52b5217bfdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zzwn6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:58Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-5m2jf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:23Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:23 crc kubenswrapper[4753]: I0129 12:07:23.983832 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-p6m5g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2412fa08-e643-4225-b494-eb999ea93fce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zjjxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zjjxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:07:23Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-p6m5g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:23Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:23 crc kubenswrapper[4753]: I0129 12:07:23.989331 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:23 crc kubenswrapper[4753]: I0129 12:07:23.989383 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:23 crc kubenswrapper[4753]: I0129 12:07:23.989398 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:23 crc kubenswrapper[4753]: I0129 12:07:23.989419 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:23 crc kubenswrapper[4753]: I0129 12:07:23.989875 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:23Z","lastTransitionTime":"2026-01-29T12:07:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:24 crc kubenswrapper[4753]: I0129 12:07:24.265213 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-19 23:00:54.756154971 +0000 UTC Jan 29 12:07:24 crc kubenswrapper[4753]: I0129 12:07:24.269845 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:24 crc kubenswrapper[4753]: I0129 12:07:24.269902 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:24 crc kubenswrapper[4753]: I0129 12:07:24.269913 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:24 crc kubenswrapper[4753]: I0129 12:07:24.269934 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:24 crc kubenswrapper[4753]: I0129 12:07:24.269947 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:24Z","lastTransitionTime":"2026-01-29T12:07:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:24 crc kubenswrapper[4753]: I0129 12:07:24.277086 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-vwcjk" event={"ID":"a99ab890-0a5a-4abb-86fb-a3731ff6b2c1","Type":"ContainerStarted","Data":"e921d361518102483679b5ff7387e27858e315d637efb0f18e06215fe00f70a8"} Jan 29 12:07:24 crc kubenswrapper[4753]: I0129 12:07:24.298247 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5d7ccf10988fd83d31d42be6f82403067a0cc50313c4d5dcddfb528507efffa4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:24Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:24 crc kubenswrapper[4753]: I0129 12:07:24.319947 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80bec2ab-0a88-4818-9339-760edda3b07e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f15016e29f1fc14f51b4d01e772071333fd8a885cacc1804b3a03dbcfc93836\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1e6a7d8df812546f17b3e026005c5062086e97e3fa364b3fa22cefec025e4eba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f71859413676dab4a435d43c11ad7cba315a12de744c3c8b161273f6ad36580c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0ec2a21ec299b5eed363668f39a270a8b03ea79b9de3dd23bda0c30579c3de3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91c1df8b6763320498420463a3841f0bfa4fcf753c355f549c3c1e93bf5206a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://05d1e99596530f467ecdae51aec8db191ac045cd85e121fc9835e2f3688f6ae7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://34d19c3c88e356688bc3f0dbe54c1a46ef5e5f5fedbf84c5cb536b3c272decb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e40e5b13dfdc135ea09253d466bacdfc5c2ac3d28eabc76d833d71ac300905c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:07:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-nzkvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:24Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:24 crc kubenswrapper[4753]: I0129 12:07:24.337388 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7962d88e183df5c4d91f5767f2007068ea2fdfd5c31d9d6ed609c2ab9ca26999\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:24Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:24 crc kubenswrapper[4753]: I0129 12:07:24.354303 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-5m2jf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0203345d-1e9f-4cfe-bde7-90f87221d1a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fdf3d79084d3e17b1f8cb0b285fd425280f34724c6a441104a2ec52b5217bfdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zzwn6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:58Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-5m2jf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:24Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:24 crc kubenswrapper[4753]: I0129 12:07:24.369353 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-p6m5g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2412fa08-e643-4225-b494-eb999ea93fce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zjjxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zjjxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:07:23Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-p6m5g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:24Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:24 crc kubenswrapper[4753]: I0129 12:07:24.372777 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:24 crc kubenswrapper[4753]: I0129 12:07:24.372817 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:24 crc kubenswrapper[4753]: I0129 12:07:24.372831 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:24 crc kubenswrapper[4753]: I0129 12:07:24.372850 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:24 crc kubenswrapper[4753]: I0129 12:07:24.372861 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:24Z","lastTransitionTime":"2026-01-29T12:07:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:24 crc kubenswrapper[4753]: I0129 12:07:24.389250 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"703a6b9d-a15d-4ed3-b00b-db7bd5d42c61\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cab3ab80a8fddbc591cd07fdbed800f525bb8a1b5505bf818ebb59cbcce73003\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7af8be0836a8ff7d08ff48d9487acdedd4b83127538d071d13b3215400c0f8de\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91d57a9cede640bdcec325175598b72dd00398436b730248728c7d9afd545b1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e86597c91848f265cd70410d57ac60eadba1d0322b9b897097366a41c93d8134\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34151f1c4f743e8fa0a2c27e644bbea9c3911e2252501ad8aa95ad3e4346222a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T12:06:54Z\\\",\\\"message\\\":\\\"ed a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3028279136/tls.crt::/tmp/serving-cert-3028279136/tls.key\\\\\\\"\\\\nI0129 12:06:54.190978 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 12:06:54.198044 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 12:06:54.198078 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 12:06:54.198139 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 12:06:54.198147 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 12:06:54.208089 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 12:06:54.208122 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 12:06:54.208128 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 12:06:54.208132 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 12:06:54.208135 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 12:06:54.208139 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 12:06:54.208143 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 12:06:54.208164 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI0129 12:06:54.212380 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-3028279136/tls.crt::/tmp/serving-cert-3028279136/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1769688397\\\\\\\\\\\\\\\" (2026-01-29 12:06:37 +0000 UTC to 2026-02-28 12:06:38 +0000 UTC (now=2026-01-29 12:06:54.212317535 +0000 UTC))\\\\\\\"\\\\nF0129 12:06:54.212498 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e02d977e34888952623f4ced5e073d7bf1ff360f8deb435dc577d2ecf146cb2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:33Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6538a934d097cd5a295952f64961a0a8fb5bfa5ebcd27a0a4dd3638029b9cdb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6538a934d097cd5a295952f64961a0a8fb5bfa5ebcd27a0a4dd3638029b9cdb1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:24Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:24 crc kubenswrapper[4753]: I0129 12:07:24.405508 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:24Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:24 crc kubenswrapper[4753]: I0129 12:07:24.419793 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-x8kzr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d753320a-61c2-4c0e-bd48-96d74b352114\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c7a667b7243fc36ff18cb4c017152fff3d5f4eb5e28e306ace44420cd722c5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tgns4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-x8kzr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:24Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:24 crc kubenswrapper[4753]: I0129 12:07:24.439698 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vwcjk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a99ab890-0a5a-4abb-86fb-a3731ff6b2c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e921d361518102483679b5ff7387e27858e315d637efb0f18e06215fe00f70a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd5300a3195f0f4d66a2789abcd65b4ade5652b5d05eb08bfd29932b97137464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd5300a3195f0f4d66a2789abcd65b4ade5652b5d05eb08bfd29932b97137464\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d95b548f1e6e754ff4a6f8d68364586d68b03662d0a679197fd0c99c41745691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d95b548f1e6e754ff4a6f8d68364586d68b03662d0a679197fd0c99c41745691\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b8b1c7640bd4881ba79f3471e309b2d1538aea8d0c8e01d3d7cfc3677d95725a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b8b1c7640bd4881ba79f3471e309b2d1538aea8d0c8e01d3d7cfc3677d95725a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0ba51b6e2180da460faa3599850175db9d7410880ba0214425fa275803f36d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0ba51b6e2180da460faa3599850175db9d7410880ba0214425fa275803f36d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0ed771b481178d6e12c669e122932ef25e1fe09db24c8f025f50a469400af184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0ed771b481178d6e12c669e122932ef25e1fe09db24c8f025f50a469400af184\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1990423fefacfb9b390af88b9af343c1bb4c02014db6bf8226918a7be9f4ad5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1990423fefacfb9b390af88b9af343c1bb4c02014db6bf8226918a7be9f4ad5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:59Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vwcjk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:24Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:24 crc kubenswrapper[4753]: I0129 12:07:24.456632 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0310995-a7c7-47c3-ae6c-05daaaba92a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e56ac830346c0f85227bcebc9b76c2af8646767d4678934985d7b1f5846dcb82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-txdmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8391a68625fc3d8fdbc4f16b0b7b25e359798d08ce65ad8f482722910873418a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-txdmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:58Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7c24x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:24Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:24 crc kubenswrapper[4753]: I0129 12:07:24.474395 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rnbz9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b372210b-6e1b-4a80-b379-7c1d570712f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://346c36936603f4535d5ad3e6b60e451b05b4d376e2531c813f0c900c92caf5fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqzp6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:59Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rnbz9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:24Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:24 crc kubenswrapper[4753]: I0129 12:07:24.475975 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:24 crc kubenswrapper[4753]: I0129 12:07:24.476045 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:24 crc kubenswrapper[4753]: I0129 12:07:24.476058 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:24 crc kubenswrapper[4753]: I0129 12:07:24.476080 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:24 crc kubenswrapper[4753]: I0129 12:07:24.476096 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:24Z","lastTransitionTime":"2026-01-29T12:07:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:24 crc kubenswrapper[4753]: I0129 12:07:24.487443 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-9dtck" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"132de771-c8b4-403f-8916-8b453e7c6fc3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://30aac9e31bfe76eba96a50a30b03419b0b23ddd888485d609f37b84af28b8d3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gvjpd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://829af7b1f520503a0f36c60dd12b25d45c3831ba0cbb61f4f0a80582cb46da1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gvjpd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:07:21Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-9dtck\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:24Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:24 crc kubenswrapper[4753]: I0129 12:07:24.501451 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b108c546-a8c1-4bf9-814b-bfb5871b3013\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9842d9d6fed8c87bc20835964d3271b0be3117fedc658e6ef028db422d1ad478\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4fd4169036d3a646c9e5e4bfa8d86a92c557c3d8f77608ae5308dd0b8c45517\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b4a6bbb431e321e54013c96e3cca1537283cbdf65d89a978780550d603663fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://31bd7db7bddd3e16cb455f7af9fd3a9bca49919c8f1ffb676dec83c543ba7763\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:24Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:24 crc kubenswrapper[4753]: I0129 12:07:24.514935 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:24Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:24 crc kubenswrapper[4753]: I0129 12:07:24.529748 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:24Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:24 crc kubenswrapper[4753]: I0129 12:07:24.547199 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966eb8e59f82bacf20ebfa1b1d5bf9dc0a9cdb3b430ae83ed9bde106eb3fa521\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea7c92ec02a9cf6ec5fbd8efe22a066dfef9b48f042137964c5dd49578c99079\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:24Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:24 crc kubenswrapper[4753]: I0129 12:07:24.578526 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:24 crc kubenswrapper[4753]: I0129 12:07:24.578600 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:24 crc kubenswrapper[4753]: I0129 12:07:24.578615 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:24 crc kubenswrapper[4753]: I0129 12:07:24.578641 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:24 crc kubenswrapper[4753]: I0129 12:07:24.578656 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:24Z","lastTransitionTime":"2026-01-29T12:07:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:24 crc kubenswrapper[4753]: I0129 12:07:24.681118 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:24 crc kubenswrapper[4753]: I0129 12:07:24.681184 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:24 crc kubenswrapper[4753]: I0129 12:07:24.681195 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:24 crc kubenswrapper[4753]: I0129 12:07:24.681213 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:24 crc kubenswrapper[4753]: I0129 12:07:24.681243 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:24Z","lastTransitionTime":"2026-01-29T12:07:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:24 crc kubenswrapper[4753]: I0129 12:07:24.784997 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:24 crc kubenswrapper[4753]: I0129 12:07:24.785077 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:24 crc kubenswrapper[4753]: I0129 12:07:24.785096 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:24 crc kubenswrapper[4753]: I0129 12:07:24.785140 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:24 crc kubenswrapper[4753]: I0129 12:07:24.785158 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:24Z","lastTransitionTime":"2026-01-29T12:07:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:24 crc kubenswrapper[4753]: I0129 12:07:24.888762 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:24 crc kubenswrapper[4753]: I0129 12:07:24.888847 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:24 crc kubenswrapper[4753]: I0129 12:07:24.888859 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:24 crc kubenswrapper[4753]: I0129 12:07:24.888876 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:24 crc kubenswrapper[4753]: I0129 12:07:24.888892 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:24Z","lastTransitionTime":"2026-01-29T12:07:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:24 crc kubenswrapper[4753]: I0129 12:07:24.925263 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 12:07:24 crc kubenswrapper[4753]: E0129 12:07:24.925861 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 12:07:24 crc kubenswrapper[4753]: I0129 12:07:24.926275 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-p6m5g" Jan 29 12:07:24 crc kubenswrapper[4753]: I0129 12:07:24.926332 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 12:07:24 crc kubenswrapper[4753]: E0129 12:07:24.926396 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-p6m5g" podUID="2412fa08-e643-4225-b494-eb999ea93fce" Jan 29 12:07:24 crc kubenswrapper[4753]: I0129 12:07:24.926430 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 12:07:24 crc kubenswrapper[4753]: E0129 12:07:24.926469 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 12:07:24 crc kubenswrapper[4753]: E0129 12:07:24.926517 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 12:07:24 crc kubenswrapper[4753]: I0129 12:07:24.974414 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2412fa08-e643-4225-b494-eb999ea93fce-metrics-certs\") pod \"network-metrics-daemon-p6m5g\" (UID: \"2412fa08-e643-4225-b494-eb999ea93fce\") " pod="openshift-multus/network-metrics-daemon-p6m5g" Jan 29 12:07:24 crc kubenswrapper[4753]: E0129 12:07:24.974697 4753 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 29 12:07:24 crc kubenswrapper[4753]: E0129 12:07:24.974802 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2412fa08-e643-4225-b494-eb999ea93fce-metrics-certs podName:2412fa08-e643-4225-b494-eb999ea93fce nodeName:}" failed. No retries permitted until 2026-01-29 12:07:26.974776341 +0000 UTC m=+61.226857796 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/2412fa08-e643-4225-b494-eb999ea93fce-metrics-certs") pod "network-metrics-daemon-p6m5g" (UID: "2412fa08-e643-4225-b494-eb999ea93fce") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 29 12:07:24 crc kubenswrapper[4753]: I0129 12:07:24.993046 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:24 crc kubenswrapper[4753]: I0129 12:07:24.993115 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:24 crc kubenswrapper[4753]: I0129 12:07:24.993127 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:24 crc kubenswrapper[4753]: I0129 12:07:24.993144 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:24 crc kubenswrapper[4753]: I0129 12:07:24.993163 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:24Z","lastTransitionTime":"2026-01-29T12:07:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:25 crc kubenswrapper[4753]: I0129 12:07:25.096466 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:25 crc kubenswrapper[4753]: I0129 12:07:25.096538 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:25 crc kubenswrapper[4753]: I0129 12:07:25.096551 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:25 crc kubenswrapper[4753]: I0129 12:07:25.096572 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:25 crc kubenswrapper[4753]: I0129 12:07:25.096590 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:25Z","lastTransitionTime":"2026-01-29T12:07:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:25 crc kubenswrapper[4753]: I0129 12:07:25.200072 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:25 crc kubenswrapper[4753]: I0129 12:07:25.200120 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:25 crc kubenswrapper[4753]: I0129 12:07:25.200131 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:25 crc kubenswrapper[4753]: I0129 12:07:25.200152 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:25 crc kubenswrapper[4753]: I0129 12:07:25.200165 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:25Z","lastTransitionTime":"2026-01-29T12:07:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:25 crc kubenswrapper[4753]: I0129 12:07:25.265371 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-14 08:12:17.276185943 +0000 UTC Jan 29 12:07:25 crc kubenswrapper[4753]: I0129 12:07:25.285118 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-nzkvz_80bec2ab-0a88-4818-9339-760edda3b07e/ovnkube-controller/0.log" Jan 29 12:07:25 crc kubenswrapper[4753]: I0129 12:07:25.289432 4753 generic.go:334] "Generic (PLEG): container finished" podID="80bec2ab-0a88-4818-9339-760edda3b07e" containerID="34d19c3c88e356688bc3f0dbe54c1a46ef5e5f5fedbf84c5cb536b3c272decb0" exitCode=1 Jan 29 12:07:25 crc kubenswrapper[4753]: I0129 12:07:25.289488 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" event={"ID":"80bec2ab-0a88-4818-9339-760edda3b07e","Type":"ContainerDied","Data":"34d19c3c88e356688bc3f0dbe54c1a46ef5e5f5fedbf84c5cb536b3c272decb0"} Jan 29 12:07:25 crc kubenswrapper[4753]: I0129 12:07:25.290548 4753 scope.go:117] "RemoveContainer" containerID="34d19c3c88e356688bc3f0dbe54c1a46ef5e5f5fedbf84c5cb536b3c272decb0" Jan 29 12:07:25 crc kubenswrapper[4753]: I0129 12:07:25.305824 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:25 crc kubenswrapper[4753]: I0129 12:07:25.305873 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:25 crc kubenswrapper[4753]: I0129 12:07:25.305887 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:25 crc kubenswrapper[4753]: I0129 12:07:25.305929 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:25 crc kubenswrapper[4753]: I0129 12:07:25.305949 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:25Z","lastTransitionTime":"2026-01-29T12:07:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:25 crc kubenswrapper[4753]: I0129 12:07:25.307898 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7962d88e183df5c4d91f5767f2007068ea2fdfd5c31d9d6ed609c2ab9ca26999\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:25Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:25 crc kubenswrapper[4753]: I0129 12:07:25.325855 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-5m2jf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0203345d-1e9f-4cfe-bde7-90f87221d1a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fdf3d79084d3e17b1f8cb0b285fd425280f34724c6a441104a2ec52b5217bfdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zzwn6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:58Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-5m2jf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:25Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:25 crc kubenswrapper[4753]: I0129 12:07:25.343799 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-p6m5g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2412fa08-e643-4225-b494-eb999ea93fce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zjjxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zjjxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:07:23Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-p6m5g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:25Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:25 crc kubenswrapper[4753]: I0129 12:07:25.364109 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"703a6b9d-a15d-4ed3-b00b-db7bd5d42c61\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cab3ab80a8fddbc591cd07fdbed800f525bb8a1b5505bf818ebb59cbcce73003\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7af8be0836a8ff7d08ff48d9487acdedd4b83127538d071d13b3215400c0f8de\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91d57a9cede640bdcec325175598b72dd00398436b730248728c7d9afd545b1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e86597c91848f265cd70410d57ac60eadba1d0322b9b897097366a41c93d8134\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34151f1c4f743e8fa0a2c27e644bbea9c3911e2252501ad8aa95ad3e4346222a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T12:06:54Z\\\",\\\"message\\\":\\\"ed a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3028279136/tls.crt::/tmp/serving-cert-3028279136/tls.key\\\\\\\"\\\\nI0129 12:06:54.190978 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 12:06:54.198044 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 12:06:54.198078 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 12:06:54.198139 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 12:06:54.198147 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 12:06:54.208089 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 12:06:54.208122 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 12:06:54.208128 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 12:06:54.208132 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 12:06:54.208135 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 12:06:54.208139 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 12:06:54.208143 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 12:06:54.208164 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI0129 12:06:54.212380 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-3028279136/tls.crt::/tmp/serving-cert-3028279136/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1769688397\\\\\\\\\\\\\\\" (2026-01-29 12:06:37 +0000 UTC to 2026-02-28 12:06:38 +0000 UTC (now=2026-01-29 12:06:54.212317535 +0000 UTC))\\\\\\\"\\\\nF0129 12:06:54.212498 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e02d977e34888952623f4ced5e073d7bf1ff360f8deb435dc577d2ecf146cb2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:33Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6538a934d097cd5a295952f64961a0a8fb5bfa5ebcd27a0a4dd3638029b9cdb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6538a934d097cd5a295952f64961a0a8fb5bfa5ebcd27a0a4dd3638029b9cdb1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:25Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:25 crc kubenswrapper[4753]: I0129 12:07:25.383740 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:25Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:25 crc kubenswrapper[4753]: I0129 12:07:25.400322 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-x8kzr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d753320a-61c2-4c0e-bd48-96d74b352114\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c7a667b7243fc36ff18cb4c017152fff3d5f4eb5e28e306ace44420cd722c5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tgns4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-x8kzr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:25Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:25 crc kubenswrapper[4753]: I0129 12:07:25.410413 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:25 crc kubenswrapper[4753]: I0129 12:07:25.410474 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:25 crc kubenswrapper[4753]: I0129 12:07:25.410486 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:25 crc kubenswrapper[4753]: I0129 12:07:25.410507 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:25 crc kubenswrapper[4753]: I0129 12:07:25.410520 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:25Z","lastTransitionTime":"2026-01-29T12:07:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:25 crc kubenswrapper[4753]: I0129 12:07:25.421448 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vwcjk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a99ab890-0a5a-4abb-86fb-a3731ff6b2c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e921d361518102483679b5ff7387e27858e315d637efb0f18e06215fe00f70a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd5300a3195f0f4d66a2789abcd65b4ade5652b5d05eb08bfd29932b97137464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd5300a3195f0f4d66a2789abcd65b4ade5652b5d05eb08bfd29932b97137464\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d95b548f1e6e754ff4a6f8d68364586d68b03662d0a679197fd0c99c41745691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d95b548f1e6e754ff4a6f8d68364586d68b03662d0a679197fd0c99c41745691\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b8b1c7640bd4881ba79f3471e309b2d1538aea8d0c8e01d3d7cfc3677d95725a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b8b1c7640bd4881ba79f3471e309b2d1538aea8d0c8e01d3d7cfc3677d95725a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0ba51b6e2180da460faa3599850175db9d7410880ba0214425fa275803f36d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0ba51b6e2180da460faa3599850175db9d7410880ba0214425fa275803f36d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0ed771b481178d6e12c669e122932ef25e1fe09db24c8f025f50a469400af184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0ed771b481178d6e12c669e122932ef25e1fe09db24c8f025f50a469400af184\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1990423fefacfb9b390af88b9af343c1bb4c02014db6bf8226918a7be9f4ad5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1990423fefacfb9b390af88b9af343c1bb4c02014db6bf8226918a7be9f4ad5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:59Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vwcjk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:25Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:25 crc kubenswrapper[4753]: I0129 12:07:25.447499 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b108c546-a8c1-4bf9-814b-bfb5871b3013\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9842d9d6fed8c87bc20835964d3271b0be3117fedc658e6ef028db422d1ad478\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4fd4169036d3a646c9e5e4bfa8d86a92c557c3d8f77608ae5308dd0b8c45517\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b4a6bbb431e321e54013c96e3cca1537283cbdf65d89a978780550d603663fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://31bd7db7bddd3e16cb455f7af9fd3a9bca49919c8f1ffb676dec83c543ba7763\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:25Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:25 crc kubenswrapper[4753]: I0129 12:07:25.466021 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:25Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:25 crc kubenswrapper[4753]: I0129 12:07:25.482414 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:25Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:25 crc kubenswrapper[4753]: I0129 12:07:25.492163 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 29 12:07:25 crc kubenswrapper[4753]: I0129 12:07:25.499376 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966eb8e59f82bacf20ebfa1b1d5bf9dc0a9cdb3b430ae83ed9bde106eb3fa521\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea7c92ec02a9cf6ec5fbd8efe22a066dfef9b48f042137964c5dd49578c99079\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:25Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:25 crc kubenswrapper[4753]: I0129 12:07:25.503156 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc"] Jan 29 12:07:25 crc kubenswrapper[4753]: I0129 12:07:25.514131 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:25 crc kubenswrapper[4753]: I0129 12:07:25.514183 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:25 crc kubenswrapper[4753]: I0129 12:07:25.514196 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:25 crc kubenswrapper[4753]: I0129 12:07:25.514237 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:25 crc kubenswrapper[4753]: I0129 12:07:25.514255 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:25Z","lastTransitionTime":"2026-01-29T12:07:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:25 crc kubenswrapper[4753]: I0129 12:07:25.516829 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0310995-a7c7-47c3-ae6c-05daaaba92a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e56ac830346c0f85227bcebc9b76c2af8646767d4678934985d7b1f5846dcb82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-txdmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8391a68625fc3d8fdbc4f16b0b7b25e359798d08ce65ad8f482722910873418a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-txdmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:58Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7c24x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:25Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:25 crc kubenswrapper[4753]: I0129 12:07:25.532368 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rnbz9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b372210b-6e1b-4a80-b379-7c1d570712f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://346c36936603f4535d5ad3e6b60e451b05b4d376e2531c813f0c900c92caf5fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqzp6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:59Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rnbz9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:25Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:25 crc kubenswrapper[4753]: I0129 12:07:25.546315 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-9dtck" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"132de771-c8b4-403f-8916-8b453e7c6fc3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://30aac9e31bfe76eba96a50a30b03419b0b23ddd888485d609f37b84af28b8d3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gvjpd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://829af7b1f520503a0f36c60dd12b25d45c3831ba0cbb61f4f0a80582cb46da1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gvjpd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:07:21Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-9dtck\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:25Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:25 crc kubenswrapper[4753]: I0129 12:07:25.567492 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80bec2ab-0a88-4818-9339-760edda3b07e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f15016e29f1fc14f51b4d01e772071333fd8a885cacc1804b3a03dbcfc93836\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1e6a7d8df812546f17b3e026005c5062086e97e3fa364b3fa22cefec025e4eba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f71859413676dab4a435d43c11ad7cba315a12de744c3c8b161273f6ad36580c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0ec2a21ec299b5eed363668f39a270a8b03ea79b9de3dd23bda0c30579c3de3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91c1df8b6763320498420463a3841f0bfa4fcf753c355f549c3c1e93bf5206a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://05d1e99596530f467ecdae51aec8db191ac045cd85e121fc9835e2f3688f6ae7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://34d19c3c88e356688bc3f0dbe54c1a46ef5e5f5fedbf84c5cb536b3c272decb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34d19c3c88e356688bc3f0dbe54c1a46ef5e5f5fedbf84c5cb536b3c272decb0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T12:07:25Z\\\",\\\"message\\\":\\\"sip/v1/apis/informers/externalversions/factory.go:140\\\\nI0129 12:07:25.237371 5906 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0129 12:07:25.237422 5906 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0129 12:07:25.237429 5906 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0129 12:07:25.237510 5906 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0129 12:07:25.237512 5906 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0129 12:07:25.237516 5906 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0129 12:07:25.237526 5906 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0129 12:07:25.237544 5906 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0129 12:07:25.237581 5906 handler.go:208] Removed *v1.Node event handler 2\\\\nI0129 12:07:25.237660 5906 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0129 12:07:25.237696 5906 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0129 12:07:25.237656 5906 handler.go:208] Removed *v1.Node event handler 7\\\\nI0129 12:07:25.237757 5906 factory.go:656] Stopping watch factory\\\\nI0129 12:07:25.237782 5906 ovnkube.go:599] Stopped ovnkube\\\\nI0129 12:07:25.237721 5906 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0129 12:07:2\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e40e5b13dfdc135ea09253d466bacdfc5c2ac3d28eabc76d833d71ac300905c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:07:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-nzkvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:25Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:25 crc kubenswrapper[4753]: I0129 12:07:25.583790 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5d7ccf10988fd83d31d42be6f82403067a0cc50313c4d5dcddfb528507efffa4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:25Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:25 crc kubenswrapper[4753]: I0129 12:07:25.599058 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c93f64b-e5af-4d6b-83e9-7fdfeb8548ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a9ec3e416b3b385c71b2fa28c0215fcb04b708e33fec2d85fdf0a75b848b027\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4fd3a920ea0c98edb2c8ad37d2a1a5d1ed50de5bc8c3e2f4d34c876b2cab54f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8dea62586ba5ac5a8ddc9e75b216a27593c8fe99516abc38c9c411a93842a372\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://44c03bba5a02d879c3b235cd65897fb30651871689c8f02b9526966817f79636\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44c03bba5a02d879c3b235cd65897fb30651871689c8f02b9526966817f79636\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:25Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:25 crc kubenswrapper[4753]: I0129 12:07:25.613465 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7962d88e183df5c4d91f5767f2007068ea2fdfd5c31d9d6ed609c2ab9ca26999\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:25Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:25 crc kubenswrapper[4753]: I0129 12:07:25.616654 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:25 crc kubenswrapper[4753]: I0129 12:07:25.616689 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:25 crc kubenswrapper[4753]: I0129 12:07:25.616698 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:25 crc kubenswrapper[4753]: I0129 12:07:25.616715 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:25 crc kubenswrapper[4753]: I0129 12:07:25.616728 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:25Z","lastTransitionTime":"2026-01-29T12:07:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:25 crc kubenswrapper[4753]: I0129 12:07:25.626720 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-5m2jf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0203345d-1e9f-4cfe-bde7-90f87221d1a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fdf3d79084d3e17b1f8cb0b285fd425280f34724c6a441104a2ec52b5217bfdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zzwn6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:58Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-5m2jf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:25Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:25 crc kubenswrapper[4753]: I0129 12:07:25.639550 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-p6m5g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2412fa08-e643-4225-b494-eb999ea93fce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zjjxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zjjxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:07:23Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-p6m5g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:25Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:25 crc kubenswrapper[4753]: I0129 12:07:25.654199 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"703a6b9d-a15d-4ed3-b00b-db7bd5d42c61\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cab3ab80a8fddbc591cd07fdbed800f525bb8a1b5505bf818ebb59cbcce73003\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7af8be0836a8ff7d08ff48d9487acdedd4b83127538d071d13b3215400c0f8de\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91d57a9cede640bdcec325175598b72dd00398436b730248728c7d9afd545b1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e86597c91848f265cd70410d57ac60eadba1d0322b9b897097366a41c93d8134\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34151f1c4f743e8fa0a2c27e644bbea9c3911e2252501ad8aa95ad3e4346222a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T12:06:54Z\\\",\\\"message\\\":\\\"ed a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3028279136/tls.crt::/tmp/serving-cert-3028279136/tls.key\\\\\\\"\\\\nI0129 12:06:54.190978 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 12:06:54.198044 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 12:06:54.198078 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 12:06:54.198139 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 12:06:54.198147 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 12:06:54.208089 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 12:06:54.208122 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 12:06:54.208128 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 12:06:54.208132 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 12:06:54.208135 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 12:06:54.208139 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 12:06:54.208143 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 12:06:54.208164 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI0129 12:06:54.212380 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-3028279136/tls.crt::/tmp/serving-cert-3028279136/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1769688397\\\\\\\\\\\\\\\" (2026-01-29 12:06:37 +0000 UTC to 2026-02-28 12:06:38 +0000 UTC (now=2026-01-29 12:06:54.212317535 +0000 UTC))\\\\\\\"\\\\nF0129 12:06:54.212498 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e02d977e34888952623f4ced5e073d7bf1ff360f8deb435dc577d2ecf146cb2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:33Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6538a934d097cd5a295952f64961a0a8fb5bfa5ebcd27a0a4dd3638029b9cdb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6538a934d097cd5a295952f64961a0a8fb5bfa5ebcd27a0a4dd3638029b9cdb1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:25Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:25 crc kubenswrapper[4753]: I0129 12:07:25.666915 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:25Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:25 crc kubenswrapper[4753]: I0129 12:07:25.677252 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-x8kzr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d753320a-61c2-4c0e-bd48-96d74b352114\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c7a667b7243fc36ff18cb4c017152fff3d5f4eb5e28e306ace44420cd722c5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tgns4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-x8kzr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:25Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:25 crc kubenswrapper[4753]: I0129 12:07:25.693024 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vwcjk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a99ab890-0a5a-4abb-86fb-a3731ff6b2c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e921d361518102483679b5ff7387e27858e315d637efb0f18e06215fe00f70a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd5300a3195f0f4d66a2789abcd65b4ade5652b5d05eb08bfd29932b97137464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd5300a3195f0f4d66a2789abcd65b4ade5652b5d05eb08bfd29932b97137464\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d95b548f1e6e754ff4a6f8d68364586d68b03662d0a679197fd0c99c41745691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d95b548f1e6e754ff4a6f8d68364586d68b03662d0a679197fd0c99c41745691\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b8b1c7640bd4881ba79f3471e309b2d1538aea8d0c8e01d3d7cfc3677d95725a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b8b1c7640bd4881ba79f3471e309b2d1538aea8d0c8e01d3d7cfc3677d95725a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0ba51b6e2180da460faa3599850175db9d7410880ba0214425fa275803f36d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0ba51b6e2180da460faa3599850175db9d7410880ba0214425fa275803f36d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0ed771b481178d6e12c669e122932ef25e1fe09db24c8f025f50a469400af184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0ed771b481178d6e12c669e122932ef25e1fe09db24c8f025f50a469400af184\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1990423fefacfb9b390af88b9af343c1bb4c02014db6bf8226918a7be9f4ad5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1990423fefacfb9b390af88b9af343c1bb4c02014db6bf8226918a7be9f4ad5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:59Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vwcjk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:25Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:25 crc kubenswrapper[4753]: I0129 12:07:25.709777 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rnbz9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b372210b-6e1b-4a80-b379-7c1d570712f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://346c36936603f4535d5ad3e6b60e451b05b4d376e2531c813f0c900c92caf5fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqzp6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:59Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rnbz9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:25Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:25 crc kubenswrapper[4753]: I0129 12:07:25.720441 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:25 crc kubenswrapper[4753]: I0129 12:07:25.720491 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:25 crc kubenswrapper[4753]: I0129 12:07:25.720502 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:25 crc kubenswrapper[4753]: I0129 12:07:25.720521 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:25 crc kubenswrapper[4753]: I0129 12:07:25.720533 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:25Z","lastTransitionTime":"2026-01-29T12:07:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:25 crc kubenswrapper[4753]: I0129 12:07:25.728029 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-9dtck" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"132de771-c8b4-403f-8916-8b453e7c6fc3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://30aac9e31bfe76eba96a50a30b03419b0b23ddd888485d609f37b84af28b8d3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gvjpd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://829af7b1f520503a0f36c60dd12b25d45c3831ba0cbb61f4f0a80582cb46da1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gvjpd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:07:21Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-9dtck\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:25Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:25 crc kubenswrapper[4753]: I0129 12:07:25.748041 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b108c546-a8c1-4bf9-814b-bfb5871b3013\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9842d9d6fed8c87bc20835964d3271b0be3117fedc658e6ef028db422d1ad478\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4fd4169036d3a646c9e5e4bfa8d86a92c557c3d8f77608ae5308dd0b8c45517\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b4a6bbb431e321e54013c96e3cca1537283cbdf65d89a978780550d603663fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://31bd7db7bddd3e16cb455f7af9fd3a9bca49919c8f1ffb676dec83c543ba7763\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:25Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:25 crc kubenswrapper[4753]: I0129 12:07:25.769435 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:25Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:25 crc kubenswrapper[4753]: I0129 12:07:25.787026 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:25Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:25 crc kubenswrapper[4753]: I0129 12:07:25.804690 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966eb8e59f82bacf20ebfa1b1d5bf9dc0a9cdb3b430ae83ed9bde106eb3fa521\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea7c92ec02a9cf6ec5fbd8efe22a066dfef9b48f042137964c5dd49578c99079\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:25Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:25 crc kubenswrapper[4753]: I0129 12:07:25.822983 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0310995-a7c7-47c3-ae6c-05daaaba92a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e56ac830346c0f85227bcebc9b76c2af8646767d4678934985d7b1f5846dcb82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-txdmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8391a68625fc3d8fdbc4f16b0b7b25e359798d08ce65ad8f482722910873418a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-txdmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:58Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7c24x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:25Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:25 crc kubenswrapper[4753]: I0129 12:07:25.823043 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:25 crc kubenswrapper[4753]: I0129 12:07:25.823083 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:25 crc kubenswrapper[4753]: I0129 12:07:25.823092 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:25 crc kubenswrapper[4753]: I0129 12:07:25.823118 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:25 crc kubenswrapper[4753]: I0129 12:07:25.823127 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:25Z","lastTransitionTime":"2026-01-29T12:07:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:25 crc kubenswrapper[4753]: I0129 12:07:25.836866 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5d7ccf10988fd83d31d42be6f82403067a0cc50313c4d5dcddfb528507efffa4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:25Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:25 crc kubenswrapper[4753]: I0129 12:07:25.868969 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80bec2ab-0a88-4818-9339-760edda3b07e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f15016e29f1fc14f51b4d01e772071333fd8a885cacc1804b3a03dbcfc93836\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1e6a7d8df812546f17b3e026005c5062086e97e3fa364b3fa22cefec025e4eba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f71859413676dab4a435d43c11ad7cba315a12de744c3c8b161273f6ad36580c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0ec2a21ec299b5eed363668f39a270a8b03ea79b9de3dd23bda0c30579c3de3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91c1df8b6763320498420463a3841f0bfa4fcf753c355f549c3c1e93bf5206a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://05d1e99596530f467ecdae51aec8db191ac045cd85e121fc9835e2f3688f6ae7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://34d19c3c88e356688bc3f0dbe54c1a46ef5e5f5fedbf84c5cb536b3c272decb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34d19c3c88e356688bc3f0dbe54c1a46ef5e5f5fedbf84c5cb536b3c272decb0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T12:07:25Z\\\",\\\"message\\\":\\\"sip/v1/apis/informers/externalversions/factory.go:140\\\\nI0129 12:07:25.237371 5906 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0129 12:07:25.237422 5906 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0129 12:07:25.237429 5906 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0129 12:07:25.237510 5906 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0129 12:07:25.237512 5906 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0129 12:07:25.237516 5906 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0129 12:07:25.237526 5906 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0129 12:07:25.237544 5906 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0129 12:07:25.237581 5906 handler.go:208] Removed *v1.Node event handler 2\\\\nI0129 12:07:25.237660 5906 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0129 12:07:25.237696 5906 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0129 12:07:25.237656 5906 handler.go:208] Removed *v1.Node event handler 7\\\\nI0129 12:07:25.237757 5906 factory.go:656] Stopping watch factory\\\\nI0129 12:07:25.237782 5906 ovnkube.go:599] Stopped ovnkube\\\\nI0129 12:07:25.237721 5906 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0129 12:07:2\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e40e5b13dfdc135ea09253d466bacdfc5c2ac3d28eabc76d833d71ac300905c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:07:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-nzkvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:25Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:25 crc kubenswrapper[4753]: I0129 12:07:25.925762 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:25 crc kubenswrapper[4753]: I0129 12:07:25.925810 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:25 crc kubenswrapper[4753]: I0129 12:07:25.925823 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:25 crc kubenswrapper[4753]: I0129 12:07:25.925848 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:25 crc kubenswrapper[4753]: I0129 12:07:25.925860 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:25Z","lastTransitionTime":"2026-01-29T12:07:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:26 crc kubenswrapper[4753]: I0129 12:07:26.029199 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:26 crc kubenswrapper[4753]: I0129 12:07:26.029288 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:26 crc kubenswrapper[4753]: I0129 12:07:26.029299 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:26 crc kubenswrapper[4753]: I0129 12:07:26.029321 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:26 crc kubenswrapper[4753]: I0129 12:07:26.029336 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:26Z","lastTransitionTime":"2026-01-29T12:07:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:26 crc kubenswrapper[4753]: I0129 12:07:26.132537 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:26 crc kubenswrapper[4753]: I0129 12:07:26.132597 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:26 crc kubenswrapper[4753]: I0129 12:07:26.132612 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:26 crc kubenswrapper[4753]: I0129 12:07:26.132639 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:26 crc kubenswrapper[4753]: I0129 12:07:26.132653 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:26Z","lastTransitionTime":"2026-01-29T12:07:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:26 crc kubenswrapper[4753]: I0129 12:07:26.235622 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:26 crc kubenswrapper[4753]: I0129 12:07:26.235666 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:26 crc kubenswrapper[4753]: I0129 12:07:26.235679 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:26 crc kubenswrapper[4753]: I0129 12:07:26.235698 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:26 crc kubenswrapper[4753]: I0129 12:07:26.235710 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:26Z","lastTransitionTime":"2026-01-29T12:07:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:26 crc kubenswrapper[4753]: I0129 12:07:26.265649 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-10 02:17:08.417790403 +0000 UTC Jan 29 12:07:26 crc kubenswrapper[4753]: I0129 12:07:26.295061 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-nzkvz_80bec2ab-0a88-4818-9339-760edda3b07e/ovnkube-controller/0.log" Jan 29 12:07:26 crc kubenswrapper[4753]: I0129 12:07:26.298749 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" event={"ID":"80bec2ab-0a88-4818-9339-760edda3b07e","Type":"ContainerStarted","Data":"355e2c7c84bbb5dcfd75734a95868268a117bb2fad6db6fc7c23ab6afbf7fb58"} Jan 29 12:07:26 crc kubenswrapper[4753]: I0129 12:07:26.299629 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" Jan 29 12:07:26 crc kubenswrapper[4753]: I0129 12:07:26.315850 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5d7ccf10988fd83d31d42be6f82403067a0cc50313c4d5dcddfb528507efffa4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:26Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:26 crc kubenswrapper[4753]: I0129 12:07:26.338755 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:26 crc kubenswrapper[4753]: I0129 12:07:26.338841 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:26 crc kubenswrapper[4753]: I0129 12:07:26.338857 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:26 crc kubenswrapper[4753]: I0129 12:07:26.338876 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:26 crc kubenswrapper[4753]: I0129 12:07:26.338889 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:26Z","lastTransitionTime":"2026-01-29T12:07:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:26 crc kubenswrapper[4753]: I0129 12:07:26.339592 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80bec2ab-0a88-4818-9339-760edda3b07e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f15016e29f1fc14f51b4d01e772071333fd8a885cacc1804b3a03dbcfc93836\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1e6a7d8df812546f17b3e026005c5062086e97e3fa364b3fa22cefec025e4eba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f71859413676dab4a435d43c11ad7cba315a12de744c3c8b161273f6ad36580c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0ec2a21ec299b5eed363668f39a270a8b03ea79b9de3dd23bda0c30579c3de3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91c1df8b6763320498420463a3841f0bfa4fcf753c355f549c3c1e93bf5206a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://05d1e99596530f467ecdae51aec8db191ac045cd85e121fc9835e2f3688f6ae7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://355e2c7c84bbb5dcfd75734a95868268a117bb2fad6db6fc7c23ab6afbf7fb58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34d19c3c88e356688bc3f0dbe54c1a46ef5e5f5fedbf84c5cb536b3c272decb0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T12:07:25Z\\\",\\\"message\\\":\\\"sip/v1/apis/informers/externalversions/factory.go:140\\\\nI0129 12:07:25.237371 5906 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0129 12:07:25.237422 5906 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0129 12:07:25.237429 5906 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0129 12:07:25.237510 5906 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0129 12:07:25.237512 5906 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0129 12:07:25.237516 5906 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0129 12:07:25.237526 5906 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0129 12:07:25.237544 5906 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0129 12:07:25.237581 5906 handler.go:208] Removed *v1.Node event handler 2\\\\nI0129 12:07:25.237660 5906 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0129 12:07:25.237696 5906 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0129 12:07:25.237656 5906 handler.go:208] Removed *v1.Node event handler 7\\\\nI0129 12:07:25.237757 5906 factory.go:656] Stopping watch factory\\\\nI0129 12:07:25.237782 5906 ovnkube.go:599] Stopped ovnkube\\\\nI0129 12:07:25.237721 5906 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0129 12:07:2\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:14Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e40e5b13dfdc135ea09253d466bacdfc5c2ac3d28eabc76d833d71ac300905c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:07:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-nzkvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:26Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:26 crc kubenswrapper[4753]: I0129 12:07:26.356220 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-p6m5g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2412fa08-e643-4225-b494-eb999ea93fce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zjjxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zjjxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:07:23Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-p6m5g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:26Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:26 crc kubenswrapper[4753]: I0129 12:07:26.370641 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c93f64b-e5af-4d6b-83e9-7fdfeb8548ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a9ec3e416b3b385c71b2fa28c0215fcb04b708e33fec2d85fdf0a75b848b027\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4fd3a920ea0c98edb2c8ad37d2a1a5d1ed50de5bc8c3e2f4d34c876b2cab54f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8dea62586ba5ac5a8ddc9e75b216a27593c8fe99516abc38c9c411a93842a372\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://44c03bba5a02d879c3b235cd65897fb30651871689c8f02b9526966817f79636\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44c03bba5a02d879c3b235cd65897fb30651871689c8f02b9526966817f79636\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:26Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:26 crc kubenswrapper[4753]: I0129 12:07:26.388283 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7962d88e183df5c4d91f5767f2007068ea2fdfd5c31d9d6ed609c2ab9ca26999\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:26Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:26 crc kubenswrapper[4753]: I0129 12:07:26.401749 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-5m2jf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0203345d-1e9f-4cfe-bde7-90f87221d1a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fdf3d79084d3e17b1f8cb0b285fd425280f34724c6a441104a2ec52b5217bfdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zzwn6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:58Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-5m2jf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:26Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:26 crc kubenswrapper[4753]: I0129 12:07:26.416174 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-x8kzr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d753320a-61c2-4c0e-bd48-96d74b352114\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c7a667b7243fc36ff18cb4c017152fff3d5f4eb5e28e306ace44420cd722c5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tgns4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-x8kzr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:26Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:26 crc kubenswrapper[4753]: I0129 12:07:26.434413 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vwcjk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a99ab890-0a5a-4abb-86fb-a3731ff6b2c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e921d361518102483679b5ff7387e27858e315d637efb0f18e06215fe00f70a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd5300a3195f0f4d66a2789abcd65b4ade5652b5d05eb08bfd29932b97137464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd5300a3195f0f4d66a2789abcd65b4ade5652b5d05eb08bfd29932b97137464\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d95b548f1e6e754ff4a6f8d68364586d68b03662d0a679197fd0c99c41745691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d95b548f1e6e754ff4a6f8d68364586d68b03662d0a679197fd0c99c41745691\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b8b1c7640bd4881ba79f3471e309b2d1538aea8d0c8e01d3d7cfc3677d95725a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b8b1c7640bd4881ba79f3471e309b2d1538aea8d0c8e01d3d7cfc3677d95725a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0ba51b6e2180da460faa3599850175db9d7410880ba0214425fa275803f36d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0ba51b6e2180da460faa3599850175db9d7410880ba0214425fa275803f36d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0ed771b481178d6e12c669e122932ef25e1fe09db24c8f025f50a469400af184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0ed771b481178d6e12c669e122932ef25e1fe09db24c8f025f50a469400af184\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1990423fefacfb9b390af88b9af343c1bb4c02014db6bf8226918a7be9f4ad5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1990423fefacfb9b390af88b9af343c1bb4c02014db6bf8226918a7be9f4ad5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:59Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vwcjk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:26Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:26 crc kubenswrapper[4753]: I0129 12:07:26.441824 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:26 crc kubenswrapper[4753]: I0129 12:07:26.441874 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:26 crc kubenswrapper[4753]: I0129 12:07:26.441888 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:26 crc kubenswrapper[4753]: I0129 12:07:26.441908 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:26 crc kubenswrapper[4753]: I0129 12:07:26.441922 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:26Z","lastTransitionTime":"2026-01-29T12:07:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:26 crc kubenswrapper[4753]: I0129 12:07:26.453559 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"703a6b9d-a15d-4ed3-b00b-db7bd5d42c61\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cab3ab80a8fddbc591cd07fdbed800f525bb8a1b5505bf818ebb59cbcce73003\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7af8be0836a8ff7d08ff48d9487acdedd4b83127538d071d13b3215400c0f8de\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91d57a9cede640bdcec325175598b72dd00398436b730248728c7d9afd545b1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e86597c91848f265cd70410d57ac60eadba1d0322b9b897097366a41c93d8134\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34151f1c4f743e8fa0a2c27e644bbea9c3911e2252501ad8aa95ad3e4346222a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T12:06:54Z\\\",\\\"message\\\":\\\"ed a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3028279136/tls.crt::/tmp/serving-cert-3028279136/tls.key\\\\\\\"\\\\nI0129 12:06:54.190978 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 12:06:54.198044 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 12:06:54.198078 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 12:06:54.198139 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 12:06:54.198147 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 12:06:54.208089 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 12:06:54.208122 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 12:06:54.208128 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 12:06:54.208132 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 12:06:54.208135 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 12:06:54.208139 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 12:06:54.208143 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 12:06:54.208164 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI0129 12:06:54.212380 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-3028279136/tls.crt::/tmp/serving-cert-3028279136/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1769688397\\\\\\\\\\\\\\\" (2026-01-29 12:06:37 +0000 UTC to 2026-02-28 12:06:38 +0000 UTC (now=2026-01-29 12:06:54.212317535 +0000 UTC))\\\\\\\"\\\\nF0129 12:06:54.212498 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e02d977e34888952623f4ced5e073d7bf1ff360f8deb435dc577d2ecf146cb2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:33Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6538a934d097cd5a295952f64961a0a8fb5bfa5ebcd27a0a4dd3638029b9cdb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6538a934d097cd5a295952f64961a0a8fb5bfa5ebcd27a0a4dd3638029b9cdb1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:26Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:26 crc kubenswrapper[4753]: I0129 12:07:26.470025 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:26Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:26 crc kubenswrapper[4753]: I0129 12:07:26.483707 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:26Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:26 crc kubenswrapper[4753]: I0129 12:07:26.500372 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966eb8e59f82bacf20ebfa1b1d5bf9dc0a9cdb3b430ae83ed9bde106eb3fa521\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea7c92ec02a9cf6ec5fbd8efe22a066dfef9b48f042137964c5dd49578c99079\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:26Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:26 crc kubenswrapper[4753]: I0129 12:07:26.519438 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0310995-a7c7-47c3-ae6c-05daaaba92a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e56ac830346c0f85227bcebc9b76c2af8646767d4678934985d7b1f5846dcb82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-txdmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8391a68625fc3d8fdbc4f16b0b7b25e359798d08ce65ad8f482722910873418a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-txdmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:58Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7c24x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:26Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:26 crc kubenswrapper[4753]: I0129 12:07:26.536020 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rnbz9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b372210b-6e1b-4a80-b379-7c1d570712f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://346c36936603f4535d5ad3e6b60e451b05b4d376e2531c813f0c900c92caf5fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqzp6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:59Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rnbz9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:26Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:26 crc kubenswrapper[4753]: I0129 12:07:26.545800 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:26 crc kubenswrapper[4753]: I0129 12:07:26.545870 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:26 crc kubenswrapper[4753]: I0129 12:07:26.545889 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:26 crc kubenswrapper[4753]: I0129 12:07:26.545923 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:26 crc kubenswrapper[4753]: I0129 12:07:26.545962 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:26Z","lastTransitionTime":"2026-01-29T12:07:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:26 crc kubenswrapper[4753]: I0129 12:07:26.549946 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-9dtck" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"132de771-c8b4-403f-8916-8b453e7c6fc3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://30aac9e31bfe76eba96a50a30b03419b0b23ddd888485d609f37b84af28b8d3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gvjpd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://829af7b1f520503a0f36c60dd12b25d45c3831ba0cbb61f4f0a80582cb46da1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gvjpd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:07:21Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-9dtck\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:26Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:26 crc kubenswrapper[4753]: I0129 12:07:26.565044 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b108c546-a8c1-4bf9-814b-bfb5871b3013\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9842d9d6fed8c87bc20835964d3271b0be3117fedc658e6ef028db422d1ad478\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4fd4169036d3a646c9e5e4bfa8d86a92c557c3d8f77608ae5308dd0b8c45517\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b4a6bbb431e321e54013c96e3cca1537283cbdf65d89a978780550d603663fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://31bd7db7bddd3e16cb455f7af9fd3a9bca49919c8f1ffb676dec83c543ba7763\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:26Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:26 crc kubenswrapper[4753]: I0129 12:07:26.579417 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:26Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:26 crc kubenswrapper[4753]: I0129 12:07:26.649492 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:26 crc kubenswrapper[4753]: I0129 12:07:26.649548 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:26 crc kubenswrapper[4753]: I0129 12:07:26.649562 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:26 crc kubenswrapper[4753]: I0129 12:07:26.649585 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:26 crc kubenswrapper[4753]: I0129 12:07:26.649598 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:26Z","lastTransitionTime":"2026-01-29T12:07:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:26 crc kubenswrapper[4753]: I0129 12:07:26.752747 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:26 crc kubenswrapper[4753]: I0129 12:07:26.752800 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:26 crc kubenswrapper[4753]: I0129 12:07:26.752811 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:26 crc kubenswrapper[4753]: I0129 12:07:26.752831 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:26 crc kubenswrapper[4753]: I0129 12:07:26.752844 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:26Z","lastTransitionTime":"2026-01-29T12:07:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:26 crc kubenswrapper[4753]: I0129 12:07:26.855286 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:26 crc kubenswrapper[4753]: I0129 12:07:26.855373 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:26 crc kubenswrapper[4753]: I0129 12:07:26.855387 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:26 crc kubenswrapper[4753]: I0129 12:07:26.855404 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:26 crc kubenswrapper[4753]: I0129 12:07:26.855414 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:26Z","lastTransitionTime":"2026-01-29T12:07:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:26 crc kubenswrapper[4753]: I0129 12:07:26.887782 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-p6m5g" Jan 29 12:07:26 crc kubenswrapper[4753]: I0129 12:07:26.887868 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 12:07:26 crc kubenswrapper[4753]: I0129 12:07:26.887808 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 12:07:26 crc kubenswrapper[4753]: E0129 12:07:26.887958 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-p6m5g" podUID="2412fa08-e643-4225-b494-eb999ea93fce" Jan 29 12:07:26 crc kubenswrapper[4753]: E0129 12:07:26.888102 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 12:07:26 crc kubenswrapper[4753]: I0129 12:07:26.888150 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 12:07:26 crc kubenswrapper[4753]: E0129 12:07:26.888196 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 12:07:26 crc kubenswrapper[4753]: E0129 12:07:26.888285 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 12:07:26 crc kubenswrapper[4753]: I0129 12:07:26.910263 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:07:26 crc kubenswrapper[4753]: E0129 12:07:26.910477 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:07:58.910446125 +0000 UTC m=+93.162527580 (durationBeforeRetry 32s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:07:26 crc kubenswrapper[4753]: I0129 12:07:26.958541 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:26 crc kubenswrapper[4753]: I0129 12:07:26.958591 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:26 crc kubenswrapper[4753]: I0129 12:07:26.958608 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:26 crc kubenswrapper[4753]: I0129 12:07:26.958629 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:26 crc kubenswrapper[4753]: I0129 12:07:26.958647 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:26Z","lastTransitionTime":"2026-01-29T12:07:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:27 crc kubenswrapper[4753]: I0129 12:07:27.011540 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2412fa08-e643-4225-b494-eb999ea93fce-metrics-certs\") pod \"network-metrics-daemon-p6m5g\" (UID: \"2412fa08-e643-4225-b494-eb999ea93fce\") " pod="openshift-multus/network-metrics-daemon-p6m5g" Jan 29 12:07:27 crc kubenswrapper[4753]: E0129 12:07:27.011740 4753 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 29 12:07:27 crc kubenswrapper[4753]: E0129 12:07:27.011824 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2412fa08-e643-4225-b494-eb999ea93fce-metrics-certs podName:2412fa08-e643-4225-b494-eb999ea93fce nodeName:}" failed. No retries permitted until 2026-01-29 12:07:31.01180238 +0000 UTC m=+65.263883835 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/2412fa08-e643-4225-b494-eb999ea93fce-metrics-certs") pod "network-metrics-daemon-p6m5g" (UID: "2412fa08-e643-4225-b494-eb999ea93fce") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 29 12:07:27 crc kubenswrapper[4753]: I0129 12:07:27.061798 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:27 crc kubenswrapper[4753]: I0129 12:07:27.061859 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:27 crc kubenswrapper[4753]: I0129 12:07:27.061870 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:27 crc kubenswrapper[4753]: I0129 12:07:27.061893 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:27 crc kubenswrapper[4753]: I0129 12:07:27.061901 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:27Z","lastTransitionTime":"2026-01-29T12:07:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:27 crc kubenswrapper[4753]: I0129 12:07:27.165584 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:27 crc kubenswrapper[4753]: I0129 12:07:27.165661 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:27 crc kubenswrapper[4753]: I0129 12:07:27.165678 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:27 crc kubenswrapper[4753]: I0129 12:07:27.165698 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:27 crc kubenswrapper[4753]: I0129 12:07:27.165708 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:27Z","lastTransitionTime":"2026-01-29T12:07:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:27 crc kubenswrapper[4753]: I0129 12:07:27.279109 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-04 13:47:16.868548869 +0000 UTC Jan 29 12:07:27 crc kubenswrapper[4753]: I0129 12:07:27.281868 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:27 crc kubenswrapper[4753]: I0129 12:07:27.281925 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:27 crc kubenswrapper[4753]: I0129 12:07:27.281940 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:27 crc kubenswrapper[4753]: I0129 12:07:27.281956 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:27 crc kubenswrapper[4753]: I0129 12:07:27.281965 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:27Z","lastTransitionTime":"2026-01-29T12:07:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:27 crc kubenswrapper[4753]: I0129 12:07:27.304335 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-nzkvz_80bec2ab-0a88-4818-9339-760edda3b07e/ovnkube-controller/1.log" Jan 29 12:07:27 crc kubenswrapper[4753]: I0129 12:07:27.304913 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-nzkvz_80bec2ab-0a88-4818-9339-760edda3b07e/ovnkube-controller/0.log" Jan 29 12:07:27 crc kubenswrapper[4753]: I0129 12:07:27.308401 4753 generic.go:334] "Generic (PLEG): container finished" podID="80bec2ab-0a88-4818-9339-760edda3b07e" containerID="355e2c7c84bbb5dcfd75734a95868268a117bb2fad6db6fc7c23ab6afbf7fb58" exitCode=1 Jan 29 12:07:27 crc kubenswrapper[4753]: I0129 12:07:27.308462 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" event={"ID":"80bec2ab-0a88-4818-9339-760edda3b07e","Type":"ContainerDied","Data":"355e2c7c84bbb5dcfd75734a95868268a117bb2fad6db6fc7c23ab6afbf7fb58"} Jan 29 12:07:27 crc kubenswrapper[4753]: I0129 12:07:27.308520 4753 scope.go:117] "RemoveContainer" containerID="34d19c3c88e356688bc3f0dbe54c1a46ef5e5f5fedbf84c5cb536b3c272decb0" Jan 29 12:07:27 crc kubenswrapper[4753]: I0129 12:07:27.309794 4753 scope.go:117] "RemoveContainer" containerID="355e2c7c84bbb5dcfd75734a95868268a117bb2fad6db6fc7c23ab6afbf7fb58" Jan 29 12:07:27 crc kubenswrapper[4753]: E0129 12:07:27.310069 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-nzkvz_openshift-ovn-kubernetes(80bec2ab-0a88-4818-9339-760edda3b07e)\"" pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" podUID="80bec2ab-0a88-4818-9339-760edda3b07e" Jan 29 12:07:27 crc kubenswrapper[4753]: I0129 12:07:27.324847 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-5m2jf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0203345d-1e9f-4cfe-bde7-90f87221d1a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fdf3d79084d3e17b1f8cb0b285fd425280f34724c6a441104a2ec52b5217bfdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zzwn6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:58Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-5m2jf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:27Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:27 crc kubenswrapper[4753]: I0129 12:07:27.337384 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-p6m5g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2412fa08-e643-4225-b494-eb999ea93fce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zjjxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zjjxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:07:23Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-p6m5g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:27Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:27 crc kubenswrapper[4753]: I0129 12:07:27.352273 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c93f64b-e5af-4d6b-83e9-7fdfeb8548ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a9ec3e416b3b385c71b2fa28c0215fcb04b708e33fec2d85fdf0a75b848b027\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4fd3a920ea0c98edb2c8ad37d2a1a5d1ed50de5bc8c3e2f4d34c876b2cab54f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8dea62586ba5ac5a8ddc9e75b216a27593c8fe99516abc38c9c411a93842a372\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://44c03bba5a02d879c3b235cd65897fb30651871689c8f02b9526966817f79636\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44c03bba5a02d879c3b235cd65897fb30651871689c8f02b9526966817f79636\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:27Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:27 crc kubenswrapper[4753]: I0129 12:07:27.365867 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7962d88e183df5c4d91f5767f2007068ea2fdfd5c31d9d6ed609c2ab9ca26999\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:27Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:27 crc kubenswrapper[4753]: I0129 12:07:27.380300 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:27Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:27 crc kubenswrapper[4753]: I0129 12:07:27.380847 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 12:07:27 crc kubenswrapper[4753]: I0129 12:07:27.380969 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 12:07:27 crc kubenswrapper[4753]: E0129 12:07:27.380979 4753 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 29 12:07:27 crc kubenswrapper[4753]: E0129 12:07:27.381052 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-29 12:07:59.381031971 +0000 UTC m=+93.633113426 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 29 12:07:27 crc kubenswrapper[4753]: E0129 12:07:27.381217 4753 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 29 12:07:27 crc kubenswrapper[4753]: E0129 12:07:27.381374 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-29 12:07:59.38134875 +0000 UTC m=+93.633430255 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 29 12:07:27 crc kubenswrapper[4753]: I0129 12:07:27.386458 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:27 crc kubenswrapper[4753]: I0129 12:07:27.386498 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:27 crc kubenswrapper[4753]: I0129 12:07:27.386509 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:27 crc kubenswrapper[4753]: I0129 12:07:27.386542 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:27 crc kubenswrapper[4753]: I0129 12:07:27.386557 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:27Z","lastTransitionTime":"2026-01-29T12:07:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:27 crc kubenswrapper[4753]: I0129 12:07:27.395735 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-x8kzr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d753320a-61c2-4c0e-bd48-96d74b352114\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c7a667b7243fc36ff18cb4c017152fff3d5f4eb5e28e306ace44420cd722c5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tgns4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-x8kzr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:27Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:27 crc kubenswrapper[4753]: I0129 12:07:27.416352 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vwcjk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a99ab890-0a5a-4abb-86fb-a3731ff6b2c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e921d361518102483679b5ff7387e27858e315d637efb0f18e06215fe00f70a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd5300a3195f0f4d66a2789abcd65b4ade5652b5d05eb08bfd29932b97137464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd5300a3195f0f4d66a2789abcd65b4ade5652b5d05eb08bfd29932b97137464\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d95b548f1e6e754ff4a6f8d68364586d68b03662d0a679197fd0c99c41745691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d95b548f1e6e754ff4a6f8d68364586d68b03662d0a679197fd0c99c41745691\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b8b1c7640bd4881ba79f3471e309b2d1538aea8d0c8e01d3d7cfc3677d95725a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b8b1c7640bd4881ba79f3471e309b2d1538aea8d0c8e01d3d7cfc3677d95725a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0ba51b6e2180da460faa3599850175db9d7410880ba0214425fa275803f36d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0ba51b6e2180da460faa3599850175db9d7410880ba0214425fa275803f36d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0ed771b481178d6e12c669e122932ef25e1fe09db24c8f025f50a469400af184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0ed771b481178d6e12c669e122932ef25e1fe09db24c8f025f50a469400af184\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1990423fefacfb9b390af88b9af343c1bb4c02014db6bf8226918a7be9f4ad5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1990423fefacfb9b390af88b9af343c1bb4c02014db6bf8226918a7be9f4ad5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:59Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vwcjk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:27Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:27 crc kubenswrapper[4753]: I0129 12:07:27.431797 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"703a6b9d-a15d-4ed3-b00b-db7bd5d42c61\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cab3ab80a8fddbc591cd07fdbed800f525bb8a1b5505bf818ebb59cbcce73003\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7af8be0836a8ff7d08ff48d9487acdedd4b83127538d071d13b3215400c0f8de\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91d57a9cede640bdcec325175598b72dd00398436b730248728c7d9afd545b1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e86597c91848f265cd70410d57ac60eadba1d0322b9b897097366a41c93d8134\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34151f1c4f743e8fa0a2c27e644bbea9c3911e2252501ad8aa95ad3e4346222a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T12:06:54Z\\\",\\\"message\\\":\\\"ed a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3028279136/tls.crt::/tmp/serving-cert-3028279136/tls.key\\\\\\\"\\\\nI0129 12:06:54.190978 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 12:06:54.198044 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 12:06:54.198078 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 12:06:54.198139 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 12:06:54.198147 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 12:06:54.208089 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 12:06:54.208122 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 12:06:54.208128 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 12:06:54.208132 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 12:06:54.208135 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 12:06:54.208139 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 12:06:54.208143 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 12:06:54.208164 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI0129 12:06:54.212380 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-3028279136/tls.crt::/tmp/serving-cert-3028279136/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1769688397\\\\\\\\\\\\\\\" (2026-01-29 12:06:37 +0000 UTC to 2026-02-28 12:06:38 +0000 UTC (now=2026-01-29 12:06:54.212317535 +0000 UTC))\\\\\\\"\\\\nF0129 12:06:54.212498 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e02d977e34888952623f4ced5e073d7bf1ff360f8deb435dc577d2ecf146cb2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:33Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6538a934d097cd5a295952f64961a0a8fb5bfa5ebcd27a0a4dd3638029b9cdb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6538a934d097cd5a295952f64961a0a8fb5bfa5ebcd27a0a4dd3638029b9cdb1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:27Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:27 crc kubenswrapper[4753]: I0129 12:07:27.444980 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:27Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:27 crc kubenswrapper[4753]: I0129 12:07:27.460201 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:27Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:27 crc kubenswrapper[4753]: I0129 12:07:27.476508 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966eb8e59f82bacf20ebfa1b1d5bf9dc0a9cdb3b430ae83ed9bde106eb3fa521\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea7c92ec02a9cf6ec5fbd8efe22a066dfef9b48f042137964c5dd49578c99079\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:27Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:27 crc kubenswrapper[4753]: I0129 12:07:27.489705 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:27 crc kubenswrapper[4753]: I0129 12:07:27.489754 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:27 crc kubenswrapper[4753]: I0129 12:07:27.489764 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:27 crc kubenswrapper[4753]: I0129 12:07:27.489781 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:27 crc kubenswrapper[4753]: I0129 12:07:27.489791 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:27Z","lastTransitionTime":"2026-01-29T12:07:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:27 crc kubenswrapper[4753]: I0129 12:07:27.490628 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0310995-a7c7-47c3-ae6c-05daaaba92a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e56ac830346c0f85227bcebc9b76c2af8646767d4678934985d7b1f5846dcb82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-txdmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8391a68625fc3d8fdbc4f16b0b7b25e359798d08ce65ad8f482722910873418a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-txdmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:58Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7c24x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:27Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:27 crc kubenswrapper[4753]: I0129 12:07:27.506365 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rnbz9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b372210b-6e1b-4a80-b379-7c1d570712f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://346c36936603f4535d5ad3e6b60e451b05b4d376e2531c813f0c900c92caf5fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqzp6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:59Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rnbz9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:27Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:27 crc kubenswrapper[4753]: I0129 12:07:27.521852 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-9dtck" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"132de771-c8b4-403f-8916-8b453e7c6fc3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://30aac9e31bfe76eba96a50a30b03419b0b23ddd888485d609f37b84af28b8d3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gvjpd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://829af7b1f520503a0f36c60dd12b25d45c3831ba0cbb61f4f0a80582cb46da1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gvjpd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:07:21Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-9dtck\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:27Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:27 crc kubenswrapper[4753]: I0129 12:07:27.536392 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b108c546-a8c1-4bf9-814b-bfb5871b3013\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9842d9d6fed8c87bc20835964d3271b0be3117fedc658e6ef028db422d1ad478\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4fd4169036d3a646c9e5e4bfa8d86a92c557c3d8f77608ae5308dd0b8c45517\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b4a6bbb431e321e54013c96e3cca1537283cbdf65d89a978780550d603663fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://31bd7db7bddd3e16cb455f7af9fd3a9bca49919c8f1ffb676dec83c543ba7763\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:27Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:27 crc kubenswrapper[4753]: I0129 12:07:27.551094 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5d7ccf10988fd83d31d42be6f82403067a0cc50313c4d5dcddfb528507efffa4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:27Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:27 crc kubenswrapper[4753]: I0129 12:07:27.574640 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80bec2ab-0a88-4818-9339-760edda3b07e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f15016e29f1fc14f51b4d01e772071333fd8a885cacc1804b3a03dbcfc93836\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1e6a7d8df812546f17b3e026005c5062086e97e3fa364b3fa22cefec025e4eba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f71859413676dab4a435d43c11ad7cba315a12de744c3c8b161273f6ad36580c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0ec2a21ec299b5eed363668f39a270a8b03ea79b9de3dd23bda0c30579c3de3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91c1df8b6763320498420463a3841f0bfa4fcf753c355f549c3c1e93bf5206a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://05d1e99596530f467ecdae51aec8db191ac045cd85e121fc9835e2f3688f6ae7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://355e2c7c84bbb5dcfd75734a95868268a117bb2fad6db6fc7c23ab6afbf7fb58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34d19c3c88e356688bc3f0dbe54c1a46ef5e5f5fedbf84c5cb536b3c272decb0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T12:07:25Z\\\",\\\"message\\\":\\\"sip/v1/apis/informers/externalversions/factory.go:140\\\\nI0129 12:07:25.237371 5906 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0129 12:07:25.237422 5906 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0129 12:07:25.237429 5906 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0129 12:07:25.237510 5906 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0129 12:07:25.237512 5906 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0129 12:07:25.237516 5906 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0129 12:07:25.237526 5906 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0129 12:07:25.237544 5906 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0129 12:07:25.237581 5906 handler.go:208] Removed *v1.Node event handler 2\\\\nI0129 12:07:25.237660 5906 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0129 12:07:25.237696 5906 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0129 12:07:25.237656 5906 handler.go:208] Removed *v1.Node event handler 7\\\\nI0129 12:07:25.237757 5906 factory.go:656] Stopping watch factory\\\\nI0129 12:07:25.237782 5906 ovnkube.go:599] Stopped ovnkube\\\\nI0129 12:07:25.237721 5906 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0129 12:07:2\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:14Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://355e2c7c84bbb5dcfd75734a95868268a117bb2fad6db6fc7c23ab6afbf7fb58\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T12:07:26Z\\\",\\\"message\\\":\\\"ift-kube-scheduler/scheduler_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.169:443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {39432221-5995-412b-967b-35e1a9405ec7}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF0129 12:07:26.279603 6197 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:26Z is after 2025-08-24T17:21:41Z]\\\\nI0129 12:07:26.279616 6197 obj_retry.go:303] Retry object setup: *v1.Pod openshift-i\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e40e5b13dfdc135ea09253d466bacdfc5c2ac3d28eabc76d833d71ac300905c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:07:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-nzkvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:27Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:27 crc kubenswrapper[4753]: I0129 12:07:27.593392 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:27 crc kubenswrapper[4753]: I0129 12:07:27.593447 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:27 crc kubenswrapper[4753]: I0129 12:07:27.593457 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:27 crc kubenswrapper[4753]: I0129 12:07:27.593475 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:27 crc kubenswrapper[4753]: I0129 12:07:27.593485 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:27Z","lastTransitionTime":"2026-01-29T12:07:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:27 crc kubenswrapper[4753]: I0129 12:07:27.695896 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:27 crc kubenswrapper[4753]: I0129 12:07:27.695972 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:27 crc kubenswrapper[4753]: I0129 12:07:27.695985 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:27 crc kubenswrapper[4753]: I0129 12:07:27.696005 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:27 crc kubenswrapper[4753]: I0129 12:07:27.696020 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:27Z","lastTransitionTime":"2026-01-29T12:07:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:27 crc kubenswrapper[4753]: I0129 12:07:27.798951 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:27 crc kubenswrapper[4753]: I0129 12:07:27.799008 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:27 crc kubenswrapper[4753]: I0129 12:07:27.799018 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:27 crc kubenswrapper[4753]: I0129 12:07:27.799034 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:27 crc kubenswrapper[4753]: I0129 12:07:27.799044 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:27Z","lastTransitionTime":"2026-01-29T12:07:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:27 crc kubenswrapper[4753]: I0129 12:07:27.901917 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:27 crc kubenswrapper[4753]: I0129 12:07:27.901954 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:27 crc kubenswrapper[4753]: I0129 12:07:27.901963 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:27 crc kubenswrapper[4753]: I0129 12:07:27.901979 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:27 crc kubenswrapper[4753]: I0129 12:07:27.901989 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:27Z","lastTransitionTime":"2026-01-29T12:07:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:27 crc kubenswrapper[4753]: I0129 12:07:27.910215 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80bec2ab-0a88-4818-9339-760edda3b07e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f15016e29f1fc14f51b4d01e772071333fd8a885cacc1804b3a03dbcfc93836\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1e6a7d8df812546f17b3e026005c5062086e97e3fa364b3fa22cefec025e4eba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f71859413676dab4a435d43c11ad7cba315a12de744c3c8b161273f6ad36580c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0ec2a21ec299b5eed363668f39a270a8b03ea79b9de3dd23bda0c30579c3de3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91c1df8b6763320498420463a3841f0bfa4fcf753c355f549c3c1e93bf5206a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://05d1e99596530f467ecdae51aec8db191ac045cd85e121fc9835e2f3688f6ae7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://355e2c7c84bbb5dcfd75734a95868268a117bb2fad6db6fc7c23ab6afbf7fb58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34d19c3c88e356688bc3f0dbe54c1a46ef5e5f5fedbf84c5cb536b3c272decb0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T12:07:25Z\\\",\\\"message\\\":\\\"sip/v1/apis/informers/externalversions/factory.go:140\\\\nI0129 12:07:25.237371 5906 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0129 12:07:25.237422 5906 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0129 12:07:25.237429 5906 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0129 12:07:25.237510 5906 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0129 12:07:25.237512 5906 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0129 12:07:25.237516 5906 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0129 12:07:25.237526 5906 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0129 12:07:25.237544 5906 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0129 12:07:25.237581 5906 handler.go:208] Removed *v1.Node event handler 2\\\\nI0129 12:07:25.237660 5906 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0129 12:07:25.237696 5906 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0129 12:07:25.237656 5906 handler.go:208] Removed *v1.Node event handler 7\\\\nI0129 12:07:25.237757 5906 factory.go:656] Stopping watch factory\\\\nI0129 12:07:25.237782 5906 ovnkube.go:599] Stopped ovnkube\\\\nI0129 12:07:25.237721 5906 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0129 12:07:2\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:14Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://355e2c7c84bbb5dcfd75734a95868268a117bb2fad6db6fc7c23ab6afbf7fb58\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T12:07:26Z\\\",\\\"message\\\":\\\"ift-kube-scheduler/scheduler_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.169:443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {39432221-5995-412b-967b-35e1a9405ec7}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF0129 12:07:26.279603 6197 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:26Z is after 2025-08-24T17:21:41Z]\\\\nI0129 12:07:26.279616 6197 obj_retry.go:303] Retry object setup: *v1.Pod openshift-i\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e40e5b13dfdc135ea09253d466bacdfc5c2ac3d28eabc76d833d71ac300905c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:07:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-nzkvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:27Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:27 crc kubenswrapper[4753]: I0129 12:07:27.925309 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5d7ccf10988fd83d31d42be6f82403067a0cc50313c4d5dcddfb528507efffa4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:27Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:27 crc kubenswrapper[4753]: I0129 12:07:27.940811 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7962d88e183df5c4d91f5767f2007068ea2fdfd5c31d9d6ed609c2ab9ca26999\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:27Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:27 crc kubenswrapper[4753]: I0129 12:07:27.955956 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-5m2jf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0203345d-1e9f-4cfe-bde7-90f87221d1a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fdf3d79084d3e17b1f8cb0b285fd425280f34724c6a441104a2ec52b5217bfdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zzwn6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:58Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-5m2jf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:27Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:27 crc kubenswrapper[4753]: I0129 12:07:27.969336 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-p6m5g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2412fa08-e643-4225-b494-eb999ea93fce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zjjxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zjjxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:07:23Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-p6m5g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:27Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:27 crc kubenswrapper[4753]: I0129 12:07:27.983723 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c93f64b-e5af-4d6b-83e9-7fdfeb8548ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a9ec3e416b3b385c71b2fa28c0215fcb04b708e33fec2d85fdf0a75b848b027\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4fd3a920ea0c98edb2c8ad37d2a1a5d1ed50de5bc8c3e2f4d34c876b2cab54f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8dea62586ba5ac5a8ddc9e75b216a27593c8fe99516abc38c9c411a93842a372\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://44c03bba5a02d879c3b235cd65897fb30651871689c8f02b9526966817f79636\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44c03bba5a02d879c3b235cd65897fb30651871689c8f02b9526966817f79636\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:27Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:27 crc kubenswrapper[4753]: I0129 12:07:27.999656 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"703a6b9d-a15d-4ed3-b00b-db7bd5d42c61\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cab3ab80a8fddbc591cd07fdbed800f525bb8a1b5505bf818ebb59cbcce73003\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7af8be0836a8ff7d08ff48d9487acdedd4b83127538d071d13b3215400c0f8de\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91d57a9cede640bdcec325175598b72dd00398436b730248728c7d9afd545b1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e86597c91848f265cd70410d57ac60eadba1d0322b9b897097366a41c93d8134\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34151f1c4f743e8fa0a2c27e644bbea9c3911e2252501ad8aa95ad3e4346222a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T12:06:54Z\\\",\\\"message\\\":\\\"ed a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3028279136/tls.crt::/tmp/serving-cert-3028279136/tls.key\\\\\\\"\\\\nI0129 12:06:54.190978 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 12:06:54.198044 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 12:06:54.198078 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 12:06:54.198139 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 12:06:54.198147 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 12:06:54.208089 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 12:06:54.208122 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 12:06:54.208128 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 12:06:54.208132 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 12:06:54.208135 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 12:06:54.208139 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 12:06:54.208143 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 12:06:54.208164 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI0129 12:06:54.212380 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-3028279136/tls.crt::/tmp/serving-cert-3028279136/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1769688397\\\\\\\\\\\\\\\" (2026-01-29 12:06:37 +0000 UTC to 2026-02-28 12:06:38 +0000 UTC (now=2026-01-29 12:06:54.212317535 +0000 UTC))\\\\\\\"\\\\nF0129 12:06:54.212498 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e02d977e34888952623f4ced5e073d7bf1ff360f8deb435dc577d2ecf146cb2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:33Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6538a934d097cd5a295952f64961a0a8fb5bfa5ebcd27a0a4dd3638029b9cdb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6538a934d097cd5a295952f64961a0a8fb5bfa5ebcd27a0a4dd3638029b9cdb1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:27Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:28 crc kubenswrapper[4753]: I0129 12:07:28.004839 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:28 crc kubenswrapper[4753]: I0129 12:07:28.004934 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:28 crc kubenswrapper[4753]: I0129 12:07:28.004945 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:28 crc kubenswrapper[4753]: I0129 12:07:28.004973 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:28 crc kubenswrapper[4753]: I0129 12:07:28.004993 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:28Z","lastTransitionTime":"2026-01-29T12:07:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:28 crc kubenswrapper[4753]: I0129 12:07:28.014118 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:28Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:28 crc kubenswrapper[4753]: I0129 12:07:28.029012 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-x8kzr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d753320a-61c2-4c0e-bd48-96d74b352114\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c7a667b7243fc36ff18cb4c017152fff3d5f4eb5e28e306ace44420cd722c5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tgns4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-x8kzr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:28Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:28 crc kubenswrapper[4753]: I0129 12:07:28.050111 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vwcjk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a99ab890-0a5a-4abb-86fb-a3731ff6b2c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e921d361518102483679b5ff7387e27858e315d637efb0f18e06215fe00f70a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd5300a3195f0f4d66a2789abcd65b4ade5652b5d05eb08bfd29932b97137464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd5300a3195f0f4d66a2789abcd65b4ade5652b5d05eb08bfd29932b97137464\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d95b548f1e6e754ff4a6f8d68364586d68b03662d0a679197fd0c99c41745691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d95b548f1e6e754ff4a6f8d68364586d68b03662d0a679197fd0c99c41745691\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b8b1c7640bd4881ba79f3471e309b2d1538aea8d0c8e01d3d7cfc3677d95725a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b8b1c7640bd4881ba79f3471e309b2d1538aea8d0c8e01d3d7cfc3677d95725a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0ba51b6e2180da460faa3599850175db9d7410880ba0214425fa275803f36d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0ba51b6e2180da460faa3599850175db9d7410880ba0214425fa275803f36d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0ed771b481178d6e12c669e122932ef25e1fe09db24c8f025f50a469400af184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0ed771b481178d6e12c669e122932ef25e1fe09db24c8f025f50a469400af184\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1990423fefacfb9b390af88b9af343c1bb4c02014db6bf8226918a7be9f4ad5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1990423fefacfb9b390af88b9af343c1bb4c02014db6bf8226918a7be9f4ad5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:59Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vwcjk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:28Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:28 crc kubenswrapper[4753]: I0129 12:07:28.065639 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b108c546-a8c1-4bf9-814b-bfb5871b3013\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9842d9d6fed8c87bc20835964d3271b0be3117fedc658e6ef028db422d1ad478\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4fd4169036d3a646c9e5e4bfa8d86a92c557c3d8f77608ae5308dd0b8c45517\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b4a6bbb431e321e54013c96e3cca1537283cbdf65d89a978780550d603663fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://31bd7db7bddd3e16cb455f7af9fd3a9bca49919c8f1ffb676dec83c543ba7763\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:28Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:28 crc kubenswrapper[4753]: I0129 12:07:28.079868 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:28Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:28 crc kubenswrapper[4753]: I0129 12:07:28.094406 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:28Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:28 crc kubenswrapper[4753]: I0129 12:07:28.108554 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:28 crc kubenswrapper[4753]: I0129 12:07:28.108622 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:28 crc kubenswrapper[4753]: I0129 12:07:28.108638 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:28 crc kubenswrapper[4753]: I0129 12:07:28.108661 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:28 crc kubenswrapper[4753]: I0129 12:07:28.108680 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:28Z","lastTransitionTime":"2026-01-29T12:07:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:28 crc kubenswrapper[4753]: I0129 12:07:28.109442 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966eb8e59f82bacf20ebfa1b1d5bf9dc0a9cdb3b430ae83ed9bde106eb3fa521\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea7c92ec02a9cf6ec5fbd8efe22a066dfef9b48f042137964c5dd49578c99079\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:28Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:28 crc kubenswrapper[4753]: I0129 12:07:28.126776 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0310995-a7c7-47c3-ae6c-05daaaba92a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e56ac830346c0f85227bcebc9b76c2af8646767d4678934985d7b1f5846dcb82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-txdmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8391a68625fc3d8fdbc4f16b0b7b25e359798d08ce65ad8f482722910873418a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-txdmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:58Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7c24x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:28Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:28 crc kubenswrapper[4753]: I0129 12:07:28.227409 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:28 crc kubenswrapper[4753]: I0129 12:07:28.227451 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:28 crc kubenswrapper[4753]: I0129 12:07:28.227465 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:28 crc kubenswrapper[4753]: I0129 12:07:28.227492 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:28 crc kubenswrapper[4753]: I0129 12:07:28.227505 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:28Z","lastTransitionTime":"2026-01-29T12:07:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:28 crc kubenswrapper[4753]: I0129 12:07:28.245395 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rnbz9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b372210b-6e1b-4a80-b379-7c1d570712f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://346c36936603f4535d5ad3e6b60e451b05b4d376e2531c813f0c900c92caf5fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqzp6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:59Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rnbz9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:28Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:28 crc kubenswrapper[4753]: I0129 12:07:28.270626 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-9dtck" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"132de771-c8b4-403f-8916-8b453e7c6fc3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://30aac9e31bfe76eba96a50a30b03419b0b23ddd888485d609f37b84af28b8d3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gvjpd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://829af7b1f520503a0f36c60dd12b25d45c3831ba0cbb61f4f0a80582cb46da1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gvjpd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:07:21Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-9dtck\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:28Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:28 crc kubenswrapper[4753]: I0129 12:07:28.281320 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-10 14:20:12.306922649 +0000 UTC Jan 29 12:07:28 crc kubenswrapper[4753]: I0129 12:07:28.316256 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-nzkvz_80bec2ab-0a88-4818-9339-760edda3b07e/ovnkube-controller/1.log" Jan 29 12:07:28 crc kubenswrapper[4753]: I0129 12:07:28.321985 4753 scope.go:117] "RemoveContainer" containerID="355e2c7c84bbb5dcfd75734a95868268a117bb2fad6db6fc7c23ab6afbf7fb58" Jan 29 12:07:28 crc kubenswrapper[4753]: E0129 12:07:28.322173 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-nzkvz_openshift-ovn-kubernetes(80bec2ab-0a88-4818-9339-760edda3b07e)\"" pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" podUID="80bec2ab-0a88-4818-9339-760edda3b07e" Jan 29 12:07:28 crc kubenswrapper[4753]: I0129 12:07:28.329791 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:28 crc kubenswrapper[4753]: I0129 12:07:28.329848 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:28 crc kubenswrapper[4753]: I0129 12:07:28.329859 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:28 crc kubenswrapper[4753]: I0129 12:07:28.329882 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:28 crc kubenswrapper[4753]: I0129 12:07:28.329897 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:28Z","lastTransitionTime":"2026-01-29T12:07:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:28 crc kubenswrapper[4753]: I0129 12:07:28.337957 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5d7ccf10988fd83d31d42be6f82403067a0cc50313c4d5dcddfb528507efffa4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:28Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:28 crc kubenswrapper[4753]: I0129 12:07:28.359001 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80bec2ab-0a88-4818-9339-760edda3b07e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f15016e29f1fc14f51b4d01e772071333fd8a885cacc1804b3a03dbcfc93836\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1e6a7d8df812546f17b3e026005c5062086e97e3fa364b3fa22cefec025e4eba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f71859413676dab4a435d43c11ad7cba315a12de744c3c8b161273f6ad36580c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0ec2a21ec299b5eed363668f39a270a8b03ea79b9de3dd23bda0c30579c3de3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91c1df8b6763320498420463a3841f0bfa4fcf753c355f549c3c1e93bf5206a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://05d1e99596530f467ecdae51aec8db191ac045cd85e121fc9835e2f3688f6ae7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://355e2c7c84bbb5dcfd75734a95868268a117bb2fad6db6fc7c23ab6afbf7fb58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://355e2c7c84bbb5dcfd75734a95868268a117bb2fad6db6fc7c23ab6afbf7fb58\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T12:07:26Z\\\",\\\"message\\\":\\\"ift-kube-scheduler/scheduler_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.169:443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {39432221-5995-412b-967b-35e1a9405ec7}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF0129 12:07:26.279603 6197 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:26Z is after 2025-08-24T17:21:41Z]\\\\nI0129 12:07:26.279616 6197 obj_retry.go:303] Retry object setup: *v1.Pod openshift-i\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:25Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-nzkvz_openshift-ovn-kubernetes(80bec2ab-0a88-4818-9339-760edda3b07e)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e40e5b13dfdc135ea09253d466bacdfc5c2ac3d28eabc76d833d71ac300905c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:07:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-nzkvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:28Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:28 crc kubenswrapper[4753]: I0129 12:07:28.372299 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-5m2jf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0203345d-1e9f-4cfe-bde7-90f87221d1a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fdf3d79084d3e17b1f8cb0b285fd425280f34724c6a441104a2ec52b5217bfdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zzwn6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:58Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-5m2jf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:28Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:28 crc kubenswrapper[4753]: I0129 12:07:28.385792 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-p6m5g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2412fa08-e643-4225-b494-eb999ea93fce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zjjxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zjjxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:07:23Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-p6m5g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:28Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:28 crc kubenswrapper[4753]: I0129 12:07:28.400050 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c93f64b-e5af-4d6b-83e9-7fdfeb8548ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a9ec3e416b3b385c71b2fa28c0215fcb04b708e33fec2d85fdf0a75b848b027\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4fd3a920ea0c98edb2c8ad37d2a1a5d1ed50de5bc8c3e2f4d34c876b2cab54f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8dea62586ba5ac5a8ddc9e75b216a27593c8fe99516abc38c9c411a93842a372\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://44c03bba5a02d879c3b235cd65897fb30651871689c8f02b9526966817f79636\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44c03bba5a02d879c3b235cd65897fb30651871689c8f02b9526966817f79636\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:28Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:28 crc kubenswrapper[4753]: I0129 12:07:28.414051 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7962d88e183df5c4d91f5767f2007068ea2fdfd5c31d9d6ed609c2ab9ca26999\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:28Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:28 crc kubenswrapper[4753]: I0129 12:07:28.430769 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:28Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:28 crc kubenswrapper[4753]: I0129 12:07:28.432914 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:28 crc kubenswrapper[4753]: I0129 12:07:28.432951 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:28 crc kubenswrapper[4753]: I0129 12:07:28.432963 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:28 crc kubenswrapper[4753]: I0129 12:07:28.432981 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:28 crc kubenswrapper[4753]: I0129 12:07:28.432991 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:28Z","lastTransitionTime":"2026-01-29T12:07:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:28 crc kubenswrapper[4753]: I0129 12:07:28.447429 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-x8kzr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d753320a-61c2-4c0e-bd48-96d74b352114\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c7a667b7243fc36ff18cb4c017152fff3d5f4eb5e28e306ace44420cd722c5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tgns4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-x8kzr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:28Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:28 crc kubenswrapper[4753]: I0129 12:07:28.467056 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vwcjk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a99ab890-0a5a-4abb-86fb-a3731ff6b2c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e921d361518102483679b5ff7387e27858e315d637efb0f18e06215fe00f70a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd5300a3195f0f4d66a2789abcd65b4ade5652b5d05eb08bfd29932b97137464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd5300a3195f0f4d66a2789abcd65b4ade5652b5d05eb08bfd29932b97137464\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d95b548f1e6e754ff4a6f8d68364586d68b03662d0a679197fd0c99c41745691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d95b548f1e6e754ff4a6f8d68364586d68b03662d0a679197fd0c99c41745691\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b8b1c7640bd4881ba79f3471e309b2d1538aea8d0c8e01d3d7cfc3677d95725a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b8b1c7640bd4881ba79f3471e309b2d1538aea8d0c8e01d3d7cfc3677d95725a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0ba51b6e2180da460faa3599850175db9d7410880ba0214425fa275803f36d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0ba51b6e2180da460faa3599850175db9d7410880ba0214425fa275803f36d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0ed771b481178d6e12c669e122932ef25e1fe09db24c8f025f50a469400af184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0ed771b481178d6e12c669e122932ef25e1fe09db24c8f025f50a469400af184\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1990423fefacfb9b390af88b9af343c1bb4c02014db6bf8226918a7be9f4ad5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1990423fefacfb9b390af88b9af343c1bb4c02014db6bf8226918a7be9f4ad5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:59Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vwcjk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:28Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:28 crc kubenswrapper[4753]: I0129 12:07:28.480263 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"703a6b9d-a15d-4ed3-b00b-db7bd5d42c61\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cab3ab80a8fddbc591cd07fdbed800f525bb8a1b5505bf818ebb59cbcce73003\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7af8be0836a8ff7d08ff48d9487acdedd4b83127538d071d13b3215400c0f8de\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91d57a9cede640bdcec325175598b72dd00398436b730248728c7d9afd545b1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e86597c91848f265cd70410d57ac60eadba1d0322b9b897097366a41c93d8134\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34151f1c4f743e8fa0a2c27e644bbea9c3911e2252501ad8aa95ad3e4346222a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T12:06:54Z\\\",\\\"message\\\":\\\"ed a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3028279136/tls.crt::/tmp/serving-cert-3028279136/tls.key\\\\\\\"\\\\nI0129 12:06:54.190978 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 12:06:54.198044 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 12:06:54.198078 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 12:06:54.198139 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 12:06:54.198147 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 12:06:54.208089 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 12:06:54.208122 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 12:06:54.208128 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 12:06:54.208132 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 12:06:54.208135 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 12:06:54.208139 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 12:06:54.208143 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 12:06:54.208164 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI0129 12:06:54.212380 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-3028279136/tls.crt::/tmp/serving-cert-3028279136/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1769688397\\\\\\\\\\\\\\\" (2026-01-29 12:06:37 +0000 UTC to 2026-02-28 12:06:38 +0000 UTC (now=2026-01-29 12:06:54.212317535 +0000 UTC))\\\\\\\"\\\\nF0129 12:06:54.212498 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e02d977e34888952623f4ced5e073d7bf1ff360f8deb435dc577d2ecf146cb2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:33Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6538a934d097cd5a295952f64961a0a8fb5bfa5ebcd27a0a4dd3638029b9cdb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6538a934d097cd5a295952f64961a0a8fb5bfa5ebcd27a0a4dd3638029b9cdb1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:28Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:28 crc kubenswrapper[4753]: I0129 12:07:28.504138 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:28Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:28 crc kubenswrapper[4753]: I0129 12:07:28.517810 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:28Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:28 crc kubenswrapper[4753]: I0129 12:07:28.530773 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966eb8e59f82bacf20ebfa1b1d5bf9dc0a9cdb3b430ae83ed9bde106eb3fa521\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea7c92ec02a9cf6ec5fbd8efe22a066dfef9b48f042137964c5dd49578c99079\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:28Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:28 crc kubenswrapper[4753]: I0129 12:07:28.535553 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:28 crc kubenswrapper[4753]: I0129 12:07:28.535759 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:28 crc kubenswrapper[4753]: I0129 12:07:28.535859 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:28 crc kubenswrapper[4753]: I0129 12:07:28.535974 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:28 crc kubenswrapper[4753]: I0129 12:07:28.536060 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:28Z","lastTransitionTime":"2026-01-29T12:07:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:28 crc kubenswrapper[4753]: I0129 12:07:28.542970 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0310995-a7c7-47c3-ae6c-05daaaba92a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e56ac830346c0f85227bcebc9b76c2af8646767d4678934985d7b1f5846dcb82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-txdmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8391a68625fc3d8fdbc4f16b0b7b25e359798d08ce65ad8f482722910873418a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-txdmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:58Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7c24x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:28Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:28 crc kubenswrapper[4753]: I0129 12:07:28.557420 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rnbz9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b372210b-6e1b-4a80-b379-7c1d570712f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://346c36936603f4535d5ad3e6b60e451b05b4d376e2531c813f0c900c92caf5fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqzp6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:59Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rnbz9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:28Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:28 crc kubenswrapper[4753]: I0129 12:07:28.570038 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-9dtck" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"132de771-c8b4-403f-8916-8b453e7c6fc3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://30aac9e31bfe76eba96a50a30b03419b0b23ddd888485d609f37b84af28b8d3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gvjpd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://829af7b1f520503a0f36c60dd12b25d45c3831ba0cbb61f4f0a80582cb46da1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gvjpd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:07:21Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-9dtck\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:28Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:28 crc kubenswrapper[4753]: I0129 12:07:28.584202 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b108c546-a8c1-4bf9-814b-bfb5871b3013\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9842d9d6fed8c87bc20835964d3271b0be3117fedc658e6ef028db422d1ad478\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4fd4169036d3a646c9e5e4bfa8d86a92c557c3d8f77608ae5308dd0b8c45517\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b4a6bbb431e321e54013c96e3cca1537283cbdf65d89a978780550d603663fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://31bd7db7bddd3e16cb455f7af9fd3a9bca49919c8f1ffb676dec83c543ba7763\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:28Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:28 crc kubenswrapper[4753]: I0129 12:07:28.627754 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 12:07:28 crc kubenswrapper[4753]: I0129 12:07:28.627822 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 12:07:28 crc kubenswrapper[4753]: E0129 12:07:28.627997 4753 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 29 12:07:28 crc kubenswrapper[4753]: E0129 12:07:28.628016 4753 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 29 12:07:28 crc kubenswrapper[4753]: E0129 12:07:28.628033 4753 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 12:07:28 crc kubenswrapper[4753]: E0129 12:07:28.628085 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-29 12:08:00.62806897 +0000 UTC m=+94.880150425 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 12:07:28 crc kubenswrapper[4753]: E0129 12:07:28.628150 4753 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 29 12:07:28 crc kubenswrapper[4753]: E0129 12:07:28.628216 4753 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 29 12:07:28 crc kubenswrapper[4753]: E0129 12:07:28.628284 4753 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 12:07:28 crc kubenswrapper[4753]: E0129 12:07:28.628426 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-29 12:08:00.62840035 +0000 UTC m=+94.880481795 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 12:07:28 crc kubenswrapper[4753]: I0129 12:07:28.638717 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:28 crc kubenswrapper[4753]: I0129 12:07:28.638773 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:28 crc kubenswrapper[4753]: I0129 12:07:28.638785 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:28 crc kubenswrapper[4753]: I0129 12:07:28.638807 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:28 crc kubenswrapper[4753]: I0129 12:07:28.638819 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:28Z","lastTransitionTime":"2026-01-29T12:07:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:28 crc kubenswrapper[4753]: I0129 12:07:28.741449 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:28 crc kubenswrapper[4753]: I0129 12:07:28.741510 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:28 crc kubenswrapper[4753]: I0129 12:07:28.741522 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:28 crc kubenswrapper[4753]: I0129 12:07:28.741543 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:28 crc kubenswrapper[4753]: I0129 12:07:28.741553 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:28Z","lastTransitionTime":"2026-01-29T12:07:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:28 crc kubenswrapper[4753]: I0129 12:07:28.845105 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:28 crc kubenswrapper[4753]: I0129 12:07:28.845171 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:28 crc kubenswrapper[4753]: I0129 12:07:28.845184 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:28 crc kubenswrapper[4753]: I0129 12:07:28.845243 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:28 crc kubenswrapper[4753]: I0129 12:07:28.845258 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:28Z","lastTransitionTime":"2026-01-29T12:07:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:28 crc kubenswrapper[4753]: I0129 12:07:28.887829 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-p6m5g" Jan 29 12:07:28 crc kubenswrapper[4753]: I0129 12:07:28.887891 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 12:07:28 crc kubenswrapper[4753]: I0129 12:07:28.888005 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 12:07:28 crc kubenswrapper[4753]: E0129 12:07:28.888170 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-p6m5g" podUID="2412fa08-e643-4225-b494-eb999ea93fce" Jan 29 12:07:28 crc kubenswrapper[4753]: I0129 12:07:28.888209 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 12:07:28 crc kubenswrapper[4753]: E0129 12:07:28.888391 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 12:07:28 crc kubenswrapper[4753]: E0129 12:07:28.888536 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 12:07:28 crc kubenswrapper[4753]: E0129 12:07:28.888702 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 12:07:28 crc kubenswrapper[4753]: I0129 12:07:28.970763 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:28 crc kubenswrapper[4753]: I0129 12:07:28.970839 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:28 crc kubenswrapper[4753]: I0129 12:07:28.970852 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:28 crc kubenswrapper[4753]: I0129 12:07:28.970872 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:28 crc kubenswrapper[4753]: I0129 12:07:28.970886 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:28Z","lastTransitionTime":"2026-01-29T12:07:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:29 crc kubenswrapper[4753]: I0129 12:07:29.072564 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:29 crc kubenswrapper[4753]: I0129 12:07:29.072645 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:29 crc kubenswrapper[4753]: I0129 12:07:29.072663 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:29 crc kubenswrapper[4753]: I0129 12:07:29.072691 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:29 crc kubenswrapper[4753]: I0129 12:07:29.072711 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:29Z","lastTransitionTime":"2026-01-29T12:07:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:29 crc kubenswrapper[4753]: I0129 12:07:29.174775 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:29 crc kubenswrapper[4753]: I0129 12:07:29.174816 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:29 crc kubenswrapper[4753]: I0129 12:07:29.174828 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:29 crc kubenswrapper[4753]: I0129 12:07:29.174845 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:29 crc kubenswrapper[4753]: I0129 12:07:29.174856 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:29Z","lastTransitionTime":"2026-01-29T12:07:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:29 crc kubenswrapper[4753]: I0129 12:07:29.277988 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:29 crc kubenswrapper[4753]: I0129 12:07:29.278039 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:29 crc kubenswrapper[4753]: I0129 12:07:29.278049 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:29 crc kubenswrapper[4753]: I0129 12:07:29.278071 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:29 crc kubenswrapper[4753]: I0129 12:07:29.278081 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:29Z","lastTransitionTime":"2026-01-29T12:07:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:29 crc kubenswrapper[4753]: I0129 12:07:29.282247 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-29 09:07:48.490125862 +0000 UTC Jan 29 12:07:29 crc kubenswrapper[4753]: I0129 12:07:29.380315 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:29 crc kubenswrapper[4753]: I0129 12:07:29.380373 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:29 crc kubenswrapper[4753]: I0129 12:07:29.380388 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:29 crc kubenswrapper[4753]: I0129 12:07:29.380411 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:29 crc kubenswrapper[4753]: I0129 12:07:29.380422 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:29Z","lastTransitionTime":"2026-01-29T12:07:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:29 crc kubenswrapper[4753]: I0129 12:07:29.489484 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:29 crc kubenswrapper[4753]: I0129 12:07:29.489561 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:29 crc kubenswrapper[4753]: I0129 12:07:29.489580 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:29 crc kubenswrapper[4753]: I0129 12:07:29.489642 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:29 crc kubenswrapper[4753]: I0129 12:07:29.489671 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:29Z","lastTransitionTime":"2026-01-29T12:07:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:29 crc kubenswrapper[4753]: I0129 12:07:29.594194 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:29 crc kubenswrapper[4753]: I0129 12:07:29.594276 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:29 crc kubenswrapper[4753]: I0129 12:07:29.594285 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:29 crc kubenswrapper[4753]: I0129 12:07:29.594304 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:29 crc kubenswrapper[4753]: I0129 12:07:29.594316 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:29Z","lastTransitionTime":"2026-01-29T12:07:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:29 crc kubenswrapper[4753]: I0129 12:07:29.698337 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:29 crc kubenswrapper[4753]: I0129 12:07:29.698385 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:29 crc kubenswrapper[4753]: I0129 12:07:29.698394 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:29 crc kubenswrapper[4753]: I0129 12:07:29.698409 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:29 crc kubenswrapper[4753]: I0129 12:07:29.698420 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:29Z","lastTransitionTime":"2026-01-29T12:07:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:29 crc kubenswrapper[4753]: I0129 12:07:29.801367 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:29 crc kubenswrapper[4753]: I0129 12:07:29.801419 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:29 crc kubenswrapper[4753]: I0129 12:07:29.801432 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:29 crc kubenswrapper[4753]: I0129 12:07:29.801454 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:29 crc kubenswrapper[4753]: I0129 12:07:29.801476 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:29Z","lastTransitionTime":"2026-01-29T12:07:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:29 crc kubenswrapper[4753]: I0129 12:07:29.905003 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:29 crc kubenswrapper[4753]: I0129 12:07:29.905049 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:29 crc kubenswrapper[4753]: I0129 12:07:29.905064 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:29 crc kubenswrapper[4753]: I0129 12:07:29.905081 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:29 crc kubenswrapper[4753]: I0129 12:07:29.905094 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:29Z","lastTransitionTime":"2026-01-29T12:07:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:30 crc kubenswrapper[4753]: I0129 12:07:30.009514 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:30 crc kubenswrapper[4753]: I0129 12:07:30.009583 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:30 crc kubenswrapper[4753]: I0129 12:07:30.009603 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:30 crc kubenswrapper[4753]: I0129 12:07:30.009632 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:30 crc kubenswrapper[4753]: I0129 12:07:30.009698 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:30Z","lastTransitionTime":"2026-01-29T12:07:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:30 crc kubenswrapper[4753]: I0129 12:07:30.113761 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:30 crc kubenswrapper[4753]: I0129 12:07:30.113827 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:30 crc kubenswrapper[4753]: I0129 12:07:30.113838 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:30 crc kubenswrapper[4753]: I0129 12:07:30.113860 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:30 crc kubenswrapper[4753]: I0129 12:07:30.113873 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:30Z","lastTransitionTime":"2026-01-29T12:07:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:30 crc kubenswrapper[4753]: I0129 12:07:30.217246 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:30 crc kubenswrapper[4753]: I0129 12:07:30.217353 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:30 crc kubenswrapper[4753]: I0129 12:07:30.217368 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:30 crc kubenswrapper[4753]: I0129 12:07:30.217394 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:30 crc kubenswrapper[4753]: I0129 12:07:30.217421 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:30Z","lastTransitionTime":"2026-01-29T12:07:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:30 crc kubenswrapper[4753]: I0129 12:07:30.283119 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-13 22:48:17.404202029 +0000 UTC Jan 29 12:07:30 crc kubenswrapper[4753]: I0129 12:07:30.321677 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:30 crc kubenswrapper[4753]: I0129 12:07:30.321736 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:30 crc kubenswrapper[4753]: I0129 12:07:30.321749 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:30 crc kubenswrapper[4753]: I0129 12:07:30.321772 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:30 crc kubenswrapper[4753]: I0129 12:07:30.321785 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:30Z","lastTransitionTime":"2026-01-29T12:07:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:30 crc kubenswrapper[4753]: I0129 12:07:30.425009 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:30 crc kubenswrapper[4753]: I0129 12:07:30.425121 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:30 crc kubenswrapper[4753]: I0129 12:07:30.425133 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:30 crc kubenswrapper[4753]: I0129 12:07:30.425156 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:30 crc kubenswrapper[4753]: I0129 12:07:30.425169 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:30Z","lastTransitionTime":"2026-01-29T12:07:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:30 crc kubenswrapper[4753]: I0129 12:07:30.529634 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:30 crc kubenswrapper[4753]: I0129 12:07:30.530143 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:30 crc kubenswrapper[4753]: I0129 12:07:30.530309 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:30 crc kubenswrapper[4753]: I0129 12:07:30.530447 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:30 crc kubenswrapper[4753]: I0129 12:07:30.530555 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:30Z","lastTransitionTime":"2026-01-29T12:07:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:30 crc kubenswrapper[4753]: I0129 12:07:30.634989 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:30 crc kubenswrapper[4753]: I0129 12:07:30.635063 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:30 crc kubenswrapper[4753]: I0129 12:07:30.635080 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:30 crc kubenswrapper[4753]: I0129 12:07:30.635102 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:30 crc kubenswrapper[4753]: I0129 12:07:30.635119 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:30Z","lastTransitionTime":"2026-01-29T12:07:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:30 crc kubenswrapper[4753]: I0129 12:07:30.738562 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:30 crc kubenswrapper[4753]: I0129 12:07:30.738644 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:30 crc kubenswrapper[4753]: I0129 12:07:30.738658 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:30 crc kubenswrapper[4753]: I0129 12:07:30.738689 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:30 crc kubenswrapper[4753]: I0129 12:07:30.738703 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:30Z","lastTransitionTime":"2026-01-29T12:07:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:30 crc kubenswrapper[4753]: I0129 12:07:30.845306 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:30 crc kubenswrapper[4753]: I0129 12:07:30.845392 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:30 crc kubenswrapper[4753]: I0129 12:07:30.845405 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:30 crc kubenswrapper[4753]: I0129 12:07:30.845428 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:30 crc kubenswrapper[4753]: I0129 12:07:30.845444 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:30Z","lastTransitionTime":"2026-01-29T12:07:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:30 crc kubenswrapper[4753]: I0129 12:07:30.888562 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 12:07:30 crc kubenswrapper[4753]: I0129 12:07:30.888712 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-p6m5g" Jan 29 12:07:30 crc kubenswrapper[4753]: I0129 12:07:30.888558 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 12:07:30 crc kubenswrapper[4753]: I0129 12:07:30.888565 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 12:07:30 crc kubenswrapper[4753]: E0129 12:07:30.889512 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 12:07:30 crc kubenswrapper[4753]: E0129 12:07:30.889677 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 12:07:30 crc kubenswrapper[4753]: E0129 12:07:30.889908 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-p6m5g" podUID="2412fa08-e643-4225-b494-eb999ea93fce" Jan 29 12:07:30 crc kubenswrapper[4753]: E0129 12:07:30.890047 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 12:07:30 crc kubenswrapper[4753]: I0129 12:07:30.950320 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:30 crc kubenswrapper[4753]: I0129 12:07:30.950385 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:30 crc kubenswrapper[4753]: I0129 12:07:30.950401 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:30 crc kubenswrapper[4753]: I0129 12:07:30.950424 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:30 crc kubenswrapper[4753]: I0129 12:07:30.950441 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:30Z","lastTransitionTime":"2026-01-29T12:07:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:30 crc kubenswrapper[4753]: I0129 12:07:30.965884 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 12:07:30 crc kubenswrapper[4753]: I0129 12:07:30.985654 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b108c546-a8c1-4bf9-814b-bfb5871b3013\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9842d9d6fed8c87bc20835964d3271b0be3117fedc658e6ef028db422d1ad478\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4fd4169036d3a646c9e5e4bfa8d86a92c557c3d8f77608ae5308dd0b8c45517\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b4a6bbb431e321e54013c96e3cca1537283cbdf65d89a978780550d603663fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://31bd7db7bddd3e16cb455f7af9fd3a9bca49919c8f1ffb676dec83c543ba7763\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:30Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:31 crc kubenswrapper[4753]: I0129 12:07:31.003109 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:30Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:31 crc kubenswrapper[4753]: I0129 12:07:31.020038 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:31Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:31 crc kubenswrapper[4753]: I0129 12:07:31.034917 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966eb8e59f82bacf20ebfa1b1d5bf9dc0a9cdb3b430ae83ed9bde106eb3fa521\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea7c92ec02a9cf6ec5fbd8efe22a066dfef9b48f042137964c5dd49578c99079\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:31Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:31 crc kubenswrapper[4753]: I0129 12:07:31.048709 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0310995-a7c7-47c3-ae6c-05daaaba92a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e56ac830346c0f85227bcebc9b76c2af8646767d4678934985d7b1f5846dcb82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-txdmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8391a68625fc3d8fdbc4f16b0b7b25e359798d08ce65ad8f482722910873418a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-txdmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:58Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7c24x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:31Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:31 crc kubenswrapper[4753]: I0129 12:07:31.054358 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:31 crc kubenswrapper[4753]: I0129 12:07:31.054411 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:31 crc kubenswrapper[4753]: I0129 12:07:31.054423 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:31 crc kubenswrapper[4753]: I0129 12:07:31.054441 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:31 crc kubenswrapper[4753]: I0129 12:07:31.054454 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:31Z","lastTransitionTime":"2026-01-29T12:07:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:31 crc kubenswrapper[4753]: I0129 12:07:31.064302 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rnbz9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b372210b-6e1b-4a80-b379-7c1d570712f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://346c36936603f4535d5ad3e6b60e451b05b4d376e2531c813f0c900c92caf5fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqzp6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:59Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rnbz9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:31Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:31 crc kubenswrapper[4753]: I0129 12:07:31.077327 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-9dtck" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"132de771-c8b4-403f-8916-8b453e7c6fc3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://30aac9e31bfe76eba96a50a30b03419b0b23ddd888485d609f37b84af28b8d3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gvjpd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://829af7b1f520503a0f36c60dd12b25d45c3831ba0cbb61f4f0a80582cb46da1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gvjpd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:07:21Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-9dtck\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:31Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:31 crc kubenswrapper[4753]: I0129 12:07:31.094544 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2412fa08-e643-4225-b494-eb999ea93fce-metrics-certs\") pod \"network-metrics-daemon-p6m5g\" (UID: \"2412fa08-e643-4225-b494-eb999ea93fce\") " pod="openshift-multus/network-metrics-daemon-p6m5g" Jan 29 12:07:31 crc kubenswrapper[4753]: E0129 12:07:31.094941 4753 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 29 12:07:31 crc kubenswrapper[4753]: E0129 12:07:31.095120 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2412fa08-e643-4225-b494-eb999ea93fce-metrics-certs podName:2412fa08-e643-4225-b494-eb999ea93fce nodeName:}" failed. No retries permitted until 2026-01-29 12:07:39.095080464 +0000 UTC m=+73.347161919 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/2412fa08-e643-4225-b494-eb999ea93fce-metrics-certs") pod "network-metrics-daemon-p6m5g" (UID: "2412fa08-e643-4225-b494-eb999ea93fce") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 29 12:07:31 crc kubenswrapper[4753]: I0129 12:07:31.096018 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5d7ccf10988fd83d31d42be6f82403067a0cc50313c4d5dcddfb528507efffa4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:31Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:31 crc kubenswrapper[4753]: I0129 12:07:31.120101 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80bec2ab-0a88-4818-9339-760edda3b07e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f15016e29f1fc14f51b4d01e772071333fd8a885cacc1804b3a03dbcfc93836\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1e6a7d8df812546f17b3e026005c5062086e97e3fa364b3fa22cefec025e4eba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f71859413676dab4a435d43c11ad7cba315a12de744c3c8b161273f6ad36580c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0ec2a21ec299b5eed363668f39a270a8b03ea79b9de3dd23bda0c30579c3de3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91c1df8b6763320498420463a3841f0bfa4fcf753c355f549c3c1e93bf5206a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://05d1e99596530f467ecdae51aec8db191ac045cd85e121fc9835e2f3688f6ae7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://355e2c7c84bbb5dcfd75734a95868268a117bb2fad6db6fc7c23ab6afbf7fb58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://355e2c7c84bbb5dcfd75734a95868268a117bb2fad6db6fc7c23ab6afbf7fb58\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T12:07:26Z\\\",\\\"message\\\":\\\"ift-kube-scheduler/scheduler_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.169:443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {39432221-5995-412b-967b-35e1a9405ec7}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF0129 12:07:26.279603 6197 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:26Z is after 2025-08-24T17:21:41Z]\\\\nI0129 12:07:26.279616 6197 obj_retry.go:303] Retry object setup: *v1.Pod openshift-i\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:25Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-nzkvz_openshift-ovn-kubernetes(80bec2ab-0a88-4818-9339-760edda3b07e)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e40e5b13dfdc135ea09253d466bacdfc5c2ac3d28eabc76d833d71ac300905c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:07:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-nzkvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:31Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:31 crc kubenswrapper[4753]: I0129 12:07:31.135635 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c93f64b-e5af-4d6b-83e9-7fdfeb8548ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a9ec3e416b3b385c71b2fa28c0215fcb04b708e33fec2d85fdf0a75b848b027\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4fd3a920ea0c98edb2c8ad37d2a1a5d1ed50de5bc8c3e2f4d34c876b2cab54f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8dea62586ba5ac5a8ddc9e75b216a27593c8fe99516abc38c9c411a93842a372\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://44c03bba5a02d879c3b235cd65897fb30651871689c8f02b9526966817f79636\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44c03bba5a02d879c3b235cd65897fb30651871689c8f02b9526966817f79636\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:31Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:31 crc kubenswrapper[4753]: I0129 12:07:31.152521 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7962d88e183df5c4d91f5767f2007068ea2fdfd5c31d9d6ed609c2ab9ca26999\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:31Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:31 crc kubenswrapper[4753]: I0129 12:07:31.162314 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:31 crc kubenswrapper[4753]: I0129 12:07:31.162391 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:31 crc kubenswrapper[4753]: I0129 12:07:31.162408 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:31 crc kubenswrapper[4753]: I0129 12:07:31.162435 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:31 crc kubenswrapper[4753]: I0129 12:07:31.162451 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:31Z","lastTransitionTime":"2026-01-29T12:07:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:31 crc kubenswrapper[4753]: I0129 12:07:31.173066 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-5m2jf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0203345d-1e9f-4cfe-bde7-90f87221d1a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fdf3d79084d3e17b1f8cb0b285fd425280f34724c6a441104a2ec52b5217bfdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zzwn6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:58Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-5m2jf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:31Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:31 crc kubenswrapper[4753]: I0129 12:07:31.185479 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-p6m5g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2412fa08-e643-4225-b494-eb999ea93fce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zjjxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zjjxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:07:23Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-p6m5g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:31Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:31 crc kubenswrapper[4753]: I0129 12:07:31.202863 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"703a6b9d-a15d-4ed3-b00b-db7bd5d42c61\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cab3ab80a8fddbc591cd07fdbed800f525bb8a1b5505bf818ebb59cbcce73003\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7af8be0836a8ff7d08ff48d9487acdedd4b83127538d071d13b3215400c0f8de\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91d57a9cede640bdcec325175598b72dd00398436b730248728c7d9afd545b1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e86597c91848f265cd70410d57ac60eadba1d0322b9b897097366a41c93d8134\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34151f1c4f743e8fa0a2c27e644bbea9c3911e2252501ad8aa95ad3e4346222a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T12:06:54Z\\\",\\\"message\\\":\\\"ed a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3028279136/tls.crt::/tmp/serving-cert-3028279136/tls.key\\\\\\\"\\\\nI0129 12:06:54.190978 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 12:06:54.198044 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 12:06:54.198078 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 12:06:54.198139 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 12:06:54.198147 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 12:06:54.208089 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 12:06:54.208122 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 12:06:54.208128 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 12:06:54.208132 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 12:06:54.208135 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 12:06:54.208139 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 12:06:54.208143 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 12:06:54.208164 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI0129 12:06:54.212380 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-3028279136/tls.crt::/tmp/serving-cert-3028279136/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1769688397\\\\\\\\\\\\\\\" (2026-01-29 12:06:37 +0000 UTC to 2026-02-28 12:06:38 +0000 UTC (now=2026-01-29 12:06:54.212317535 +0000 UTC))\\\\\\\"\\\\nF0129 12:06:54.212498 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e02d977e34888952623f4ced5e073d7bf1ff360f8deb435dc577d2ecf146cb2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:33Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6538a934d097cd5a295952f64961a0a8fb5bfa5ebcd27a0a4dd3638029b9cdb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6538a934d097cd5a295952f64961a0a8fb5bfa5ebcd27a0a4dd3638029b9cdb1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:31Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:31 crc kubenswrapper[4753]: I0129 12:07:31.217872 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:31Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:31 crc kubenswrapper[4753]: I0129 12:07:31.229208 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-x8kzr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d753320a-61c2-4c0e-bd48-96d74b352114\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c7a667b7243fc36ff18cb4c017152fff3d5f4eb5e28e306ace44420cd722c5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tgns4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-x8kzr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:31Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:31 crc kubenswrapper[4753]: I0129 12:07:31.243882 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vwcjk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a99ab890-0a5a-4abb-86fb-a3731ff6b2c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e921d361518102483679b5ff7387e27858e315d637efb0f18e06215fe00f70a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd5300a3195f0f4d66a2789abcd65b4ade5652b5d05eb08bfd29932b97137464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd5300a3195f0f4d66a2789abcd65b4ade5652b5d05eb08bfd29932b97137464\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d95b548f1e6e754ff4a6f8d68364586d68b03662d0a679197fd0c99c41745691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d95b548f1e6e754ff4a6f8d68364586d68b03662d0a679197fd0c99c41745691\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b8b1c7640bd4881ba79f3471e309b2d1538aea8d0c8e01d3d7cfc3677d95725a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b8b1c7640bd4881ba79f3471e309b2d1538aea8d0c8e01d3d7cfc3677d95725a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0ba51b6e2180da460faa3599850175db9d7410880ba0214425fa275803f36d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0ba51b6e2180da460faa3599850175db9d7410880ba0214425fa275803f36d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0ed771b481178d6e12c669e122932ef25e1fe09db24c8f025f50a469400af184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0ed771b481178d6e12c669e122932ef25e1fe09db24c8f025f50a469400af184\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1990423fefacfb9b390af88b9af343c1bb4c02014db6bf8226918a7be9f4ad5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1990423fefacfb9b390af88b9af343c1bb4c02014db6bf8226918a7be9f4ad5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:59Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vwcjk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:31Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:31 crc kubenswrapper[4753]: I0129 12:07:31.264846 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:31 crc kubenswrapper[4753]: I0129 12:07:31.264878 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:31 crc kubenswrapper[4753]: I0129 12:07:31.264886 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:31 crc kubenswrapper[4753]: I0129 12:07:31.264902 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:31 crc kubenswrapper[4753]: I0129 12:07:31.264913 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:31Z","lastTransitionTime":"2026-01-29T12:07:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:31 crc kubenswrapper[4753]: I0129 12:07:31.284200 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-27 04:58:10.577892826 +0000 UTC Jan 29 12:07:31 crc kubenswrapper[4753]: I0129 12:07:31.367152 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:31 crc kubenswrapper[4753]: I0129 12:07:31.367188 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:31 crc kubenswrapper[4753]: I0129 12:07:31.367199 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:31 crc kubenswrapper[4753]: I0129 12:07:31.367216 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:31 crc kubenswrapper[4753]: I0129 12:07:31.367242 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:31Z","lastTransitionTime":"2026-01-29T12:07:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:31 crc kubenswrapper[4753]: I0129 12:07:31.472790 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:31 crc kubenswrapper[4753]: I0129 12:07:31.472838 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:31 crc kubenswrapper[4753]: I0129 12:07:31.472850 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:31 crc kubenswrapper[4753]: I0129 12:07:31.472870 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:31 crc kubenswrapper[4753]: I0129 12:07:31.472883 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:31Z","lastTransitionTime":"2026-01-29T12:07:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:31 crc kubenswrapper[4753]: I0129 12:07:31.576073 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:31 crc kubenswrapper[4753]: I0129 12:07:31.576121 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:31 crc kubenswrapper[4753]: I0129 12:07:31.576137 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:31 crc kubenswrapper[4753]: I0129 12:07:31.576160 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:31 crc kubenswrapper[4753]: I0129 12:07:31.576169 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:31Z","lastTransitionTime":"2026-01-29T12:07:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:31 crc kubenswrapper[4753]: I0129 12:07:31.679311 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:31 crc kubenswrapper[4753]: I0129 12:07:31.679367 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:31 crc kubenswrapper[4753]: I0129 12:07:31.679376 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:31 crc kubenswrapper[4753]: I0129 12:07:31.679397 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:31 crc kubenswrapper[4753]: I0129 12:07:31.679410 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:31Z","lastTransitionTime":"2026-01-29T12:07:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:31 crc kubenswrapper[4753]: I0129 12:07:31.784628 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:31 crc kubenswrapper[4753]: I0129 12:07:31.784708 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:31 crc kubenswrapper[4753]: I0129 12:07:31.784726 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:31 crc kubenswrapper[4753]: I0129 12:07:31.784786 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:31 crc kubenswrapper[4753]: I0129 12:07:31.784807 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:31Z","lastTransitionTime":"2026-01-29T12:07:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:31 crc kubenswrapper[4753]: I0129 12:07:31.888025 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:31 crc kubenswrapper[4753]: I0129 12:07:31.888398 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:31 crc kubenswrapper[4753]: I0129 12:07:31.888522 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:31 crc kubenswrapper[4753]: I0129 12:07:31.888758 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:31 crc kubenswrapper[4753]: I0129 12:07:31.888858 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:31Z","lastTransitionTime":"2026-01-29T12:07:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:31 crc kubenswrapper[4753]: I0129 12:07:31.992358 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:31 crc kubenswrapper[4753]: I0129 12:07:31.992411 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:31 crc kubenswrapper[4753]: I0129 12:07:31.992434 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:31 crc kubenswrapper[4753]: I0129 12:07:31.992463 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:31 crc kubenswrapper[4753]: I0129 12:07:31.992477 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:31Z","lastTransitionTime":"2026-01-29T12:07:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:32 crc kubenswrapper[4753]: I0129 12:07:32.095927 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:32 crc kubenswrapper[4753]: I0129 12:07:32.095966 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:32 crc kubenswrapper[4753]: I0129 12:07:32.095974 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:32 crc kubenswrapper[4753]: I0129 12:07:32.095989 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:32 crc kubenswrapper[4753]: I0129 12:07:32.095999 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:32Z","lastTransitionTime":"2026-01-29T12:07:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:32 crc kubenswrapper[4753]: I0129 12:07:32.198174 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:32 crc kubenswrapper[4753]: I0129 12:07:32.198243 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:32 crc kubenswrapper[4753]: I0129 12:07:32.198254 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:32 crc kubenswrapper[4753]: I0129 12:07:32.198270 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:32 crc kubenswrapper[4753]: I0129 12:07:32.198280 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:32Z","lastTransitionTime":"2026-01-29T12:07:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:32 crc kubenswrapper[4753]: I0129 12:07:32.284783 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-27 05:32:19.802176153 +0000 UTC Jan 29 12:07:32 crc kubenswrapper[4753]: I0129 12:07:32.301794 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:32 crc kubenswrapper[4753]: I0129 12:07:32.302115 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:32 crc kubenswrapper[4753]: I0129 12:07:32.302221 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:32 crc kubenswrapper[4753]: I0129 12:07:32.302376 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:32 crc kubenswrapper[4753]: I0129 12:07:32.302481 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:32Z","lastTransitionTime":"2026-01-29T12:07:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:32 crc kubenswrapper[4753]: I0129 12:07:32.405298 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:32 crc kubenswrapper[4753]: I0129 12:07:32.405591 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:32 crc kubenswrapper[4753]: I0129 12:07:32.405692 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:32 crc kubenswrapper[4753]: I0129 12:07:32.405779 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:32 crc kubenswrapper[4753]: I0129 12:07:32.405871 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:32Z","lastTransitionTime":"2026-01-29T12:07:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:32 crc kubenswrapper[4753]: I0129 12:07:32.508086 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:32 crc kubenswrapper[4753]: I0129 12:07:32.508121 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:32 crc kubenswrapper[4753]: I0129 12:07:32.508130 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:32 crc kubenswrapper[4753]: I0129 12:07:32.508147 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:32 crc kubenswrapper[4753]: I0129 12:07:32.508158 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:32Z","lastTransitionTime":"2026-01-29T12:07:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:32 crc kubenswrapper[4753]: I0129 12:07:32.610677 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:32 crc kubenswrapper[4753]: I0129 12:07:32.610733 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:32 crc kubenswrapper[4753]: I0129 12:07:32.610745 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:32 crc kubenswrapper[4753]: I0129 12:07:32.610766 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:32 crc kubenswrapper[4753]: I0129 12:07:32.610778 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:32Z","lastTransitionTime":"2026-01-29T12:07:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:32 crc kubenswrapper[4753]: I0129 12:07:32.714184 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:32 crc kubenswrapper[4753]: I0129 12:07:32.714264 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:32 crc kubenswrapper[4753]: I0129 12:07:32.714275 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:32 crc kubenswrapper[4753]: I0129 12:07:32.714296 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:32 crc kubenswrapper[4753]: I0129 12:07:32.714305 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:32Z","lastTransitionTime":"2026-01-29T12:07:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:32 crc kubenswrapper[4753]: I0129 12:07:32.818538 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:32 crc kubenswrapper[4753]: I0129 12:07:32.818587 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:32 crc kubenswrapper[4753]: I0129 12:07:32.818599 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:32 crc kubenswrapper[4753]: I0129 12:07:32.818623 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:32 crc kubenswrapper[4753]: I0129 12:07:32.818634 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:32Z","lastTransitionTime":"2026-01-29T12:07:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:32 crc kubenswrapper[4753]: I0129 12:07:32.888266 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 12:07:32 crc kubenswrapper[4753]: I0129 12:07:32.888318 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 12:07:32 crc kubenswrapper[4753]: I0129 12:07:32.888294 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 12:07:32 crc kubenswrapper[4753]: I0129 12:07:32.888276 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-p6m5g" Jan 29 12:07:32 crc kubenswrapper[4753]: E0129 12:07:32.888579 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 12:07:32 crc kubenswrapper[4753]: E0129 12:07:32.888738 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 12:07:32 crc kubenswrapper[4753]: E0129 12:07:32.888861 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-p6m5g" podUID="2412fa08-e643-4225-b494-eb999ea93fce" Jan 29 12:07:32 crc kubenswrapper[4753]: E0129 12:07:32.889110 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 12:07:32 crc kubenswrapper[4753]: I0129 12:07:32.922071 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:32 crc kubenswrapper[4753]: I0129 12:07:32.922131 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:32 crc kubenswrapper[4753]: I0129 12:07:32.922145 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:32 crc kubenswrapper[4753]: I0129 12:07:32.922167 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:32 crc kubenswrapper[4753]: I0129 12:07:32.922179 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:32Z","lastTransitionTime":"2026-01-29T12:07:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:33 crc kubenswrapper[4753]: I0129 12:07:33.025037 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:33 crc kubenswrapper[4753]: I0129 12:07:33.025420 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:33 crc kubenswrapper[4753]: I0129 12:07:33.025650 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:33 crc kubenswrapper[4753]: I0129 12:07:33.025822 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:33 crc kubenswrapper[4753]: I0129 12:07:33.025949 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:33Z","lastTransitionTime":"2026-01-29T12:07:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:33 crc kubenswrapper[4753]: I0129 12:07:33.128405 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:33 crc kubenswrapper[4753]: I0129 12:07:33.128720 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:33 crc kubenswrapper[4753]: I0129 12:07:33.128810 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:33 crc kubenswrapper[4753]: I0129 12:07:33.128955 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:33 crc kubenswrapper[4753]: I0129 12:07:33.129068 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:33Z","lastTransitionTime":"2026-01-29T12:07:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:33 crc kubenswrapper[4753]: I0129 12:07:33.233680 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:33 crc kubenswrapper[4753]: I0129 12:07:33.233733 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:33 crc kubenswrapper[4753]: I0129 12:07:33.233746 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:33 crc kubenswrapper[4753]: I0129 12:07:33.233777 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:33 crc kubenswrapper[4753]: I0129 12:07:33.233791 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:33Z","lastTransitionTime":"2026-01-29T12:07:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:33 crc kubenswrapper[4753]: I0129 12:07:33.285149 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-13 10:48:57.408916373 +0000 UTC Jan 29 12:07:33 crc kubenswrapper[4753]: I0129 12:07:33.309932 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:33 crc kubenswrapper[4753]: I0129 12:07:33.309993 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:33 crc kubenswrapper[4753]: I0129 12:07:33.310004 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:33 crc kubenswrapper[4753]: I0129 12:07:33.310023 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:33 crc kubenswrapper[4753]: I0129 12:07:33.310034 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:33Z","lastTransitionTime":"2026-01-29T12:07:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:33 crc kubenswrapper[4753]: E0129 12:07:33.327149 4753 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:33Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:33Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:33Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:33Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:33Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:33Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7536a030-b66d-4df9-a1ec-9890f7ad99e7\\\",\\\"systemUUID\\\":\\\"ce89c1ce-259c-4b78-b348-3ef96afb6944\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:33Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:33 crc kubenswrapper[4753]: I0129 12:07:33.332288 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:33 crc kubenswrapper[4753]: I0129 12:07:33.332346 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:33 crc kubenswrapper[4753]: I0129 12:07:33.332358 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:33 crc kubenswrapper[4753]: I0129 12:07:33.332400 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:33 crc kubenswrapper[4753]: I0129 12:07:33.332414 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:33Z","lastTransitionTime":"2026-01-29T12:07:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:33 crc kubenswrapper[4753]: E0129 12:07:33.349740 4753 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:33Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:33Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:33Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:33Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:33Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:33Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7536a030-b66d-4df9-a1ec-9890f7ad99e7\\\",\\\"systemUUID\\\":\\\"ce89c1ce-259c-4b78-b348-3ef96afb6944\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:33Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:33 crc kubenswrapper[4753]: I0129 12:07:33.354257 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:33 crc kubenswrapper[4753]: I0129 12:07:33.354351 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:33 crc kubenswrapper[4753]: I0129 12:07:33.354365 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:33 crc kubenswrapper[4753]: I0129 12:07:33.354388 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:33 crc kubenswrapper[4753]: I0129 12:07:33.354403 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:33Z","lastTransitionTime":"2026-01-29T12:07:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:33 crc kubenswrapper[4753]: E0129 12:07:33.370922 4753 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:33Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:33Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:33Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:33Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:33Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:33Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7536a030-b66d-4df9-a1ec-9890f7ad99e7\\\",\\\"systemUUID\\\":\\\"ce89c1ce-259c-4b78-b348-3ef96afb6944\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:33Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:33 crc kubenswrapper[4753]: I0129 12:07:33.375637 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:33 crc kubenswrapper[4753]: I0129 12:07:33.375695 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:33 crc kubenswrapper[4753]: I0129 12:07:33.375712 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:33 crc kubenswrapper[4753]: I0129 12:07:33.375736 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:33 crc kubenswrapper[4753]: I0129 12:07:33.375756 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:33Z","lastTransitionTime":"2026-01-29T12:07:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:33 crc kubenswrapper[4753]: E0129 12:07:33.388902 4753 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:33Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:33Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:33Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:33Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:33Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:33Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7536a030-b66d-4df9-a1ec-9890f7ad99e7\\\",\\\"systemUUID\\\":\\\"ce89c1ce-259c-4b78-b348-3ef96afb6944\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:33Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:33 crc kubenswrapper[4753]: I0129 12:07:33.393562 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:33 crc kubenswrapper[4753]: I0129 12:07:33.393605 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:33 crc kubenswrapper[4753]: I0129 12:07:33.393615 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:33 crc kubenswrapper[4753]: I0129 12:07:33.393634 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:33 crc kubenswrapper[4753]: I0129 12:07:33.393673 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:33Z","lastTransitionTime":"2026-01-29T12:07:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:33 crc kubenswrapper[4753]: E0129 12:07:33.407027 4753 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:33Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:33Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:33Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:33Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:33Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:33Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7536a030-b66d-4df9-a1ec-9890f7ad99e7\\\",\\\"systemUUID\\\":\\\"ce89c1ce-259c-4b78-b348-3ef96afb6944\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:33Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:33 crc kubenswrapper[4753]: E0129 12:07:33.407202 4753 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 29 12:07:33 crc kubenswrapper[4753]: I0129 12:07:33.409647 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:33 crc kubenswrapper[4753]: I0129 12:07:33.409748 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:33 crc kubenswrapper[4753]: I0129 12:07:33.409763 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:33 crc kubenswrapper[4753]: I0129 12:07:33.409786 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:33 crc kubenswrapper[4753]: I0129 12:07:33.409803 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:33Z","lastTransitionTime":"2026-01-29T12:07:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:33 crc kubenswrapper[4753]: I0129 12:07:33.514155 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:33 crc kubenswrapper[4753]: I0129 12:07:33.514348 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:33 crc kubenswrapper[4753]: I0129 12:07:33.514369 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:33 crc kubenswrapper[4753]: I0129 12:07:33.514395 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:33 crc kubenswrapper[4753]: I0129 12:07:33.514418 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:33Z","lastTransitionTime":"2026-01-29T12:07:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:33 crc kubenswrapper[4753]: I0129 12:07:33.619375 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:33 crc kubenswrapper[4753]: I0129 12:07:33.619472 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:33 crc kubenswrapper[4753]: I0129 12:07:33.619506 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:33 crc kubenswrapper[4753]: I0129 12:07:33.619563 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:33 crc kubenswrapper[4753]: I0129 12:07:33.619595 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:33Z","lastTransitionTime":"2026-01-29T12:07:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:33 crc kubenswrapper[4753]: I0129 12:07:33.722629 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:33 crc kubenswrapper[4753]: I0129 12:07:33.722672 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:33 crc kubenswrapper[4753]: I0129 12:07:33.722684 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:33 crc kubenswrapper[4753]: I0129 12:07:33.722713 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:33 crc kubenswrapper[4753]: I0129 12:07:33.722722 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:33Z","lastTransitionTime":"2026-01-29T12:07:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:33 crc kubenswrapper[4753]: I0129 12:07:33.825686 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:33 crc kubenswrapper[4753]: I0129 12:07:33.825733 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:33 crc kubenswrapper[4753]: I0129 12:07:33.825742 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:33 crc kubenswrapper[4753]: I0129 12:07:33.825763 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:33 crc kubenswrapper[4753]: I0129 12:07:33.825774 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:33Z","lastTransitionTime":"2026-01-29T12:07:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:33 crc kubenswrapper[4753]: I0129 12:07:33.928801 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:33 crc kubenswrapper[4753]: I0129 12:07:33.928853 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:33 crc kubenswrapper[4753]: I0129 12:07:33.928880 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:33 crc kubenswrapper[4753]: I0129 12:07:33.928902 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:33 crc kubenswrapper[4753]: I0129 12:07:33.928915 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:33Z","lastTransitionTime":"2026-01-29T12:07:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:34 crc kubenswrapper[4753]: I0129 12:07:34.031718 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:34 crc kubenswrapper[4753]: I0129 12:07:34.031762 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:34 crc kubenswrapper[4753]: I0129 12:07:34.031774 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:34 crc kubenswrapper[4753]: I0129 12:07:34.031791 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:34 crc kubenswrapper[4753]: I0129 12:07:34.031801 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:34Z","lastTransitionTime":"2026-01-29T12:07:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:34 crc kubenswrapper[4753]: I0129 12:07:34.134923 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:34 crc kubenswrapper[4753]: I0129 12:07:34.134968 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:34 crc kubenswrapper[4753]: I0129 12:07:34.134980 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:34 crc kubenswrapper[4753]: I0129 12:07:34.134999 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:34 crc kubenswrapper[4753]: I0129 12:07:34.135009 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:34Z","lastTransitionTime":"2026-01-29T12:07:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:34 crc kubenswrapper[4753]: I0129 12:07:34.237901 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:34 crc kubenswrapper[4753]: I0129 12:07:34.237938 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:34 crc kubenswrapper[4753]: I0129 12:07:34.237947 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:34 crc kubenswrapper[4753]: I0129 12:07:34.237962 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:34 crc kubenswrapper[4753]: I0129 12:07:34.237973 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:34Z","lastTransitionTime":"2026-01-29T12:07:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:34 crc kubenswrapper[4753]: I0129 12:07:34.285613 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-31 11:10:17.392055312 +0000 UTC Jan 29 12:07:34 crc kubenswrapper[4753]: I0129 12:07:34.340367 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:34 crc kubenswrapper[4753]: I0129 12:07:34.340429 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:34 crc kubenswrapper[4753]: I0129 12:07:34.340440 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:34 crc kubenswrapper[4753]: I0129 12:07:34.340457 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:34 crc kubenswrapper[4753]: I0129 12:07:34.340468 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:34Z","lastTransitionTime":"2026-01-29T12:07:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:34 crc kubenswrapper[4753]: I0129 12:07:34.443579 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:34 crc kubenswrapper[4753]: I0129 12:07:34.443654 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:34 crc kubenswrapper[4753]: I0129 12:07:34.443666 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:34 crc kubenswrapper[4753]: I0129 12:07:34.443704 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:34 crc kubenswrapper[4753]: I0129 12:07:34.443716 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:34Z","lastTransitionTime":"2026-01-29T12:07:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:34 crc kubenswrapper[4753]: I0129 12:07:34.546380 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:34 crc kubenswrapper[4753]: I0129 12:07:34.546434 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:34 crc kubenswrapper[4753]: I0129 12:07:34.546447 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:34 crc kubenswrapper[4753]: I0129 12:07:34.546463 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:34 crc kubenswrapper[4753]: I0129 12:07:34.546472 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:34Z","lastTransitionTime":"2026-01-29T12:07:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:34 crc kubenswrapper[4753]: I0129 12:07:34.649030 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:34 crc kubenswrapper[4753]: I0129 12:07:34.649072 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:34 crc kubenswrapper[4753]: I0129 12:07:34.649081 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:34 crc kubenswrapper[4753]: I0129 12:07:34.649097 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:34 crc kubenswrapper[4753]: I0129 12:07:34.649134 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:34Z","lastTransitionTime":"2026-01-29T12:07:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:34 crc kubenswrapper[4753]: I0129 12:07:34.757345 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:34 crc kubenswrapper[4753]: I0129 12:07:34.757587 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:34 crc kubenswrapper[4753]: I0129 12:07:34.757601 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:34 crc kubenswrapper[4753]: I0129 12:07:34.757637 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:34 crc kubenswrapper[4753]: I0129 12:07:34.757648 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:34Z","lastTransitionTime":"2026-01-29T12:07:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:34 crc kubenswrapper[4753]: I0129 12:07:34.863377 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:34 crc kubenswrapper[4753]: I0129 12:07:34.863437 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:34 crc kubenswrapper[4753]: I0129 12:07:34.863451 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:34 crc kubenswrapper[4753]: I0129 12:07:34.863472 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:34 crc kubenswrapper[4753]: I0129 12:07:34.863484 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:34Z","lastTransitionTime":"2026-01-29T12:07:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:34 crc kubenswrapper[4753]: I0129 12:07:34.887664 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 12:07:34 crc kubenswrapper[4753]: I0129 12:07:34.887727 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 12:07:34 crc kubenswrapper[4753]: I0129 12:07:34.887836 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 12:07:34 crc kubenswrapper[4753]: I0129 12:07:34.887864 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-p6m5g" Jan 29 12:07:34 crc kubenswrapper[4753]: E0129 12:07:34.887911 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 12:07:34 crc kubenswrapper[4753]: E0129 12:07:34.888183 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 12:07:34 crc kubenswrapper[4753]: E0129 12:07:34.888106 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-p6m5g" podUID="2412fa08-e643-4225-b494-eb999ea93fce" Jan 29 12:07:34 crc kubenswrapper[4753]: E0129 12:07:34.888449 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 12:07:34 crc kubenswrapper[4753]: I0129 12:07:34.965831 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:34 crc kubenswrapper[4753]: I0129 12:07:34.965872 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:34 crc kubenswrapper[4753]: I0129 12:07:34.965881 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:34 crc kubenswrapper[4753]: I0129 12:07:34.965899 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:34 crc kubenswrapper[4753]: I0129 12:07:34.965908 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:34Z","lastTransitionTime":"2026-01-29T12:07:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:35 crc kubenswrapper[4753]: I0129 12:07:35.068470 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:35 crc kubenswrapper[4753]: I0129 12:07:35.068514 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:35 crc kubenswrapper[4753]: I0129 12:07:35.068526 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:35 crc kubenswrapper[4753]: I0129 12:07:35.068546 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:35 crc kubenswrapper[4753]: I0129 12:07:35.068559 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:35Z","lastTransitionTime":"2026-01-29T12:07:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:35 crc kubenswrapper[4753]: I0129 12:07:35.170899 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:35 crc kubenswrapper[4753]: I0129 12:07:35.170940 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:35 crc kubenswrapper[4753]: I0129 12:07:35.170953 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:35 crc kubenswrapper[4753]: I0129 12:07:35.170970 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:35 crc kubenswrapper[4753]: I0129 12:07:35.170982 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:35Z","lastTransitionTime":"2026-01-29T12:07:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:35 crc kubenswrapper[4753]: I0129 12:07:35.274687 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:35 crc kubenswrapper[4753]: I0129 12:07:35.275041 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:35 crc kubenswrapper[4753]: I0129 12:07:35.275162 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:35 crc kubenswrapper[4753]: I0129 12:07:35.275324 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:35 crc kubenswrapper[4753]: I0129 12:07:35.275465 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:35Z","lastTransitionTime":"2026-01-29T12:07:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:35 crc kubenswrapper[4753]: I0129 12:07:35.285997 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-23 18:11:26.132745604 +0000 UTC Jan 29 12:07:35 crc kubenswrapper[4753]: I0129 12:07:35.379419 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:35 crc kubenswrapper[4753]: I0129 12:07:35.379480 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:35 crc kubenswrapper[4753]: I0129 12:07:35.379497 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:35 crc kubenswrapper[4753]: I0129 12:07:35.379526 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:35 crc kubenswrapper[4753]: I0129 12:07:35.379542 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:35Z","lastTransitionTime":"2026-01-29T12:07:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:35 crc kubenswrapper[4753]: I0129 12:07:35.483074 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:35 crc kubenswrapper[4753]: I0129 12:07:35.483132 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:35 crc kubenswrapper[4753]: I0129 12:07:35.483149 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:35 crc kubenswrapper[4753]: I0129 12:07:35.483172 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:35 crc kubenswrapper[4753]: I0129 12:07:35.483187 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:35Z","lastTransitionTime":"2026-01-29T12:07:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:35 crc kubenswrapper[4753]: I0129 12:07:35.588080 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:35 crc kubenswrapper[4753]: I0129 12:07:35.588132 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:35 crc kubenswrapper[4753]: I0129 12:07:35.588143 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:35 crc kubenswrapper[4753]: I0129 12:07:35.588162 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:35 crc kubenswrapper[4753]: I0129 12:07:35.588175 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:35Z","lastTransitionTime":"2026-01-29T12:07:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:35 crc kubenswrapper[4753]: I0129 12:07:35.690980 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:35 crc kubenswrapper[4753]: I0129 12:07:35.691043 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:35 crc kubenswrapper[4753]: I0129 12:07:35.691055 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:35 crc kubenswrapper[4753]: I0129 12:07:35.691075 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:35 crc kubenswrapper[4753]: I0129 12:07:35.691088 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:35Z","lastTransitionTime":"2026-01-29T12:07:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:35 crc kubenswrapper[4753]: I0129 12:07:35.793909 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:35 crc kubenswrapper[4753]: I0129 12:07:35.793944 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:35 crc kubenswrapper[4753]: I0129 12:07:35.793955 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:35 crc kubenswrapper[4753]: I0129 12:07:35.793972 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:35 crc kubenswrapper[4753]: I0129 12:07:35.793985 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:35Z","lastTransitionTime":"2026-01-29T12:07:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:35 crc kubenswrapper[4753]: I0129 12:07:35.896646 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:35 crc kubenswrapper[4753]: I0129 12:07:35.896717 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:35 crc kubenswrapper[4753]: I0129 12:07:35.896744 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:35 crc kubenswrapper[4753]: I0129 12:07:35.896780 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:35 crc kubenswrapper[4753]: I0129 12:07:35.896797 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:35Z","lastTransitionTime":"2026-01-29T12:07:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:36 crc kubenswrapper[4753]: I0129 12:07:36.000089 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:36 crc kubenswrapper[4753]: I0129 12:07:36.000400 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:36 crc kubenswrapper[4753]: I0129 12:07:36.000508 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:36 crc kubenswrapper[4753]: I0129 12:07:36.000596 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:36 crc kubenswrapper[4753]: I0129 12:07:36.000681 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:36Z","lastTransitionTime":"2026-01-29T12:07:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:36 crc kubenswrapper[4753]: I0129 12:07:36.103663 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:36 crc kubenswrapper[4753]: I0129 12:07:36.103709 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:36 crc kubenswrapper[4753]: I0129 12:07:36.103721 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:36 crc kubenswrapper[4753]: I0129 12:07:36.103795 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:36 crc kubenswrapper[4753]: I0129 12:07:36.103824 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:36Z","lastTransitionTime":"2026-01-29T12:07:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:36 crc kubenswrapper[4753]: I0129 12:07:36.207189 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:36 crc kubenswrapper[4753]: I0129 12:07:36.207268 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:36 crc kubenswrapper[4753]: I0129 12:07:36.207281 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:36 crc kubenswrapper[4753]: I0129 12:07:36.207307 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:36 crc kubenswrapper[4753]: I0129 12:07:36.207321 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:36Z","lastTransitionTime":"2026-01-29T12:07:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:36 crc kubenswrapper[4753]: I0129 12:07:36.596550 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-12 19:55:17.29723252 +0000 UTC Jan 29 12:07:36 crc kubenswrapper[4753]: I0129 12:07:36.600929 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:36 crc kubenswrapper[4753]: I0129 12:07:36.600984 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:36 crc kubenswrapper[4753]: I0129 12:07:36.600998 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:36 crc kubenswrapper[4753]: I0129 12:07:36.601017 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:36 crc kubenswrapper[4753]: I0129 12:07:36.601028 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:36Z","lastTransitionTime":"2026-01-29T12:07:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:36 crc kubenswrapper[4753]: I0129 12:07:36.703826 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:36 crc kubenswrapper[4753]: I0129 12:07:36.703865 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:36 crc kubenswrapper[4753]: I0129 12:07:36.703875 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:36 crc kubenswrapper[4753]: I0129 12:07:36.703891 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:36 crc kubenswrapper[4753]: I0129 12:07:36.703901 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:36Z","lastTransitionTime":"2026-01-29T12:07:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:36 crc kubenswrapper[4753]: I0129 12:07:36.806503 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:36 crc kubenswrapper[4753]: I0129 12:07:36.806546 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:36 crc kubenswrapper[4753]: I0129 12:07:36.806558 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:36 crc kubenswrapper[4753]: I0129 12:07:36.806577 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:36 crc kubenswrapper[4753]: I0129 12:07:36.806589 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:36Z","lastTransitionTime":"2026-01-29T12:07:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:36 crc kubenswrapper[4753]: I0129 12:07:36.888131 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 12:07:36 crc kubenswrapper[4753]: I0129 12:07:36.888216 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-p6m5g" Jan 29 12:07:36 crc kubenswrapper[4753]: I0129 12:07:36.888150 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 12:07:36 crc kubenswrapper[4753]: I0129 12:07:36.888245 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 12:07:36 crc kubenswrapper[4753]: E0129 12:07:36.888346 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 12:07:36 crc kubenswrapper[4753]: E0129 12:07:36.888457 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 12:07:36 crc kubenswrapper[4753]: E0129 12:07:36.888567 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 12:07:36 crc kubenswrapper[4753]: E0129 12:07:36.888634 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-p6m5g" podUID="2412fa08-e643-4225-b494-eb999ea93fce" Jan 29 12:07:36 crc kubenswrapper[4753]: I0129 12:07:36.909436 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:36 crc kubenswrapper[4753]: I0129 12:07:36.909479 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:36 crc kubenswrapper[4753]: I0129 12:07:36.909489 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:36 crc kubenswrapper[4753]: I0129 12:07:36.909509 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:36 crc kubenswrapper[4753]: I0129 12:07:36.909519 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:36Z","lastTransitionTime":"2026-01-29T12:07:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:37 crc kubenswrapper[4753]: I0129 12:07:37.012494 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:37 crc kubenswrapper[4753]: I0129 12:07:37.012543 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:37 crc kubenswrapper[4753]: I0129 12:07:37.012555 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:37 crc kubenswrapper[4753]: I0129 12:07:37.012571 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:37 crc kubenswrapper[4753]: I0129 12:07:37.012592 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:37Z","lastTransitionTime":"2026-01-29T12:07:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:37 crc kubenswrapper[4753]: I0129 12:07:37.115556 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:37 crc kubenswrapper[4753]: I0129 12:07:37.115601 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:37 crc kubenswrapper[4753]: I0129 12:07:37.115612 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:37 crc kubenswrapper[4753]: I0129 12:07:37.115630 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:37 crc kubenswrapper[4753]: I0129 12:07:37.115641 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:37Z","lastTransitionTime":"2026-01-29T12:07:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:37 crc kubenswrapper[4753]: I0129 12:07:37.218872 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:37 crc kubenswrapper[4753]: I0129 12:07:37.218930 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:37 crc kubenswrapper[4753]: I0129 12:07:37.218941 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:37 crc kubenswrapper[4753]: I0129 12:07:37.218961 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:37 crc kubenswrapper[4753]: I0129 12:07:37.218974 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:37Z","lastTransitionTime":"2026-01-29T12:07:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:37 crc kubenswrapper[4753]: I0129 12:07:37.322647 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:37 crc kubenswrapper[4753]: I0129 12:07:37.322736 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:37 crc kubenswrapper[4753]: I0129 12:07:37.322771 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:37 crc kubenswrapper[4753]: I0129 12:07:37.322812 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:37 crc kubenswrapper[4753]: I0129 12:07:37.322825 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:37Z","lastTransitionTime":"2026-01-29T12:07:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:37 crc kubenswrapper[4753]: I0129 12:07:37.520364 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:37 crc kubenswrapper[4753]: I0129 12:07:37.520417 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:37 crc kubenswrapper[4753]: I0129 12:07:37.520431 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:37 crc kubenswrapper[4753]: I0129 12:07:37.520452 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:37 crc kubenswrapper[4753]: I0129 12:07:37.520464 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:37Z","lastTransitionTime":"2026-01-29T12:07:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:37 crc kubenswrapper[4753]: I0129 12:07:37.597040 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-13 18:22:06.541552353 +0000 UTC Jan 29 12:07:37 crc kubenswrapper[4753]: I0129 12:07:37.623012 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:37 crc kubenswrapper[4753]: I0129 12:07:37.623047 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:37 crc kubenswrapper[4753]: I0129 12:07:37.623056 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:37 crc kubenswrapper[4753]: I0129 12:07:37.623075 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:37 crc kubenswrapper[4753]: I0129 12:07:37.623085 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:37Z","lastTransitionTime":"2026-01-29T12:07:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:37 crc kubenswrapper[4753]: I0129 12:07:37.726058 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:37 crc kubenswrapper[4753]: I0129 12:07:37.726108 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:37 crc kubenswrapper[4753]: I0129 12:07:37.726120 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:37 crc kubenswrapper[4753]: I0129 12:07:37.726139 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:37 crc kubenswrapper[4753]: I0129 12:07:37.726154 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:37Z","lastTransitionTime":"2026-01-29T12:07:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:37 crc kubenswrapper[4753]: I0129 12:07:37.828967 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:37 crc kubenswrapper[4753]: I0129 12:07:37.829023 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:37 crc kubenswrapper[4753]: I0129 12:07:37.829043 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:37 crc kubenswrapper[4753]: I0129 12:07:37.829067 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:37 crc kubenswrapper[4753]: I0129 12:07:37.829083 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:37Z","lastTransitionTime":"2026-01-29T12:07:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:37 crc kubenswrapper[4753]: I0129 12:07:37.911478 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5d7ccf10988fd83d31d42be6f82403067a0cc50313c4d5dcddfb528507efffa4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:37Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:37 crc kubenswrapper[4753]: I0129 12:07:37.931279 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:37 crc kubenswrapper[4753]: I0129 12:07:37.931310 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:37 crc kubenswrapper[4753]: I0129 12:07:37.931318 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:37 crc kubenswrapper[4753]: I0129 12:07:37.931332 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:37 crc kubenswrapper[4753]: I0129 12:07:37.931343 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:37Z","lastTransitionTime":"2026-01-29T12:07:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:37 crc kubenswrapper[4753]: I0129 12:07:37.938568 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80bec2ab-0a88-4818-9339-760edda3b07e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f15016e29f1fc14f51b4d01e772071333fd8a885cacc1804b3a03dbcfc93836\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1e6a7d8df812546f17b3e026005c5062086e97e3fa364b3fa22cefec025e4eba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f71859413676dab4a435d43c11ad7cba315a12de744c3c8b161273f6ad36580c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0ec2a21ec299b5eed363668f39a270a8b03ea79b9de3dd23bda0c30579c3de3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91c1df8b6763320498420463a3841f0bfa4fcf753c355f549c3c1e93bf5206a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://05d1e99596530f467ecdae51aec8db191ac045cd85e121fc9835e2f3688f6ae7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://355e2c7c84bbb5dcfd75734a95868268a117bb2fad6db6fc7c23ab6afbf7fb58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://355e2c7c84bbb5dcfd75734a95868268a117bb2fad6db6fc7c23ab6afbf7fb58\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T12:07:26Z\\\",\\\"message\\\":\\\"ift-kube-scheduler/scheduler_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.169:443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {39432221-5995-412b-967b-35e1a9405ec7}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF0129 12:07:26.279603 6197 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:26Z is after 2025-08-24T17:21:41Z]\\\\nI0129 12:07:26.279616 6197 obj_retry.go:303] Retry object setup: *v1.Pod openshift-i\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:25Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-nzkvz_openshift-ovn-kubernetes(80bec2ab-0a88-4818-9339-760edda3b07e)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e40e5b13dfdc135ea09253d466bacdfc5c2ac3d28eabc76d833d71ac300905c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:07:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-nzkvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:37Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:37 crc kubenswrapper[4753]: I0129 12:07:37.957132 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c93f64b-e5af-4d6b-83e9-7fdfeb8548ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a9ec3e416b3b385c71b2fa28c0215fcb04b708e33fec2d85fdf0a75b848b027\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4fd3a920ea0c98edb2c8ad37d2a1a5d1ed50de5bc8c3e2f4d34c876b2cab54f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8dea62586ba5ac5a8ddc9e75b216a27593c8fe99516abc38c9c411a93842a372\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://44c03bba5a02d879c3b235cd65897fb30651871689c8f02b9526966817f79636\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44c03bba5a02d879c3b235cd65897fb30651871689c8f02b9526966817f79636\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:37Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:37 crc kubenswrapper[4753]: I0129 12:07:37.974427 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7962d88e183df5c4d91f5767f2007068ea2fdfd5c31d9d6ed609c2ab9ca26999\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:37Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:37 crc kubenswrapper[4753]: I0129 12:07:37.989409 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-5m2jf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0203345d-1e9f-4cfe-bde7-90f87221d1a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fdf3d79084d3e17b1f8cb0b285fd425280f34724c6a441104a2ec52b5217bfdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zzwn6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:58Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-5m2jf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:37Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:38 crc kubenswrapper[4753]: I0129 12:07:38.006944 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-p6m5g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2412fa08-e643-4225-b494-eb999ea93fce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zjjxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zjjxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:07:23Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-p6m5g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:38Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:38 crc kubenswrapper[4753]: I0129 12:07:38.029528 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"703a6b9d-a15d-4ed3-b00b-db7bd5d42c61\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cab3ab80a8fddbc591cd07fdbed800f525bb8a1b5505bf818ebb59cbcce73003\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7af8be0836a8ff7d08ff48d9487acdedd4b83127538d071d13b3215400c0f8de\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91d57a9cede640bdcec325175598b72dd00398436b730248728c7d9afd545b1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e86597c91848f265cd70410d57ac60eadba1d0322b9b897097366a41c93d8134\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34151f1c4f743e8fa0a2c27e644bbea9c3911e2252501ad8aa95ad3e4346222a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T12:06:54Z\\\",\\\"message\\\":\\\"ed a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3028279136/tls.crt::/tmp/serving-cert-3028279136/tls.key\\\\\\\"\\\\nI0129 12:06:54.190978 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 12:06:54.198044 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 12:06:54.198078 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 12:06:54.198139 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 12:06:54.198147 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 12:06:54.208089 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 12:06:54.208122 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 12:06:54.208128 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 12:06:54.208132 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 12:06:54.208135 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 12:06:54.208139 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 12:06:54.208143 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 12:06:54.208164 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI0129 12:06:54.212380 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-3028279136/tls.crt::/tmp/serving-cert-3028279136/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1769688397\\\\\\\\\\\\\\\" (2026-01-29 12:06:37 +0000 UTC to 2026-02-28 12:06:38 +0000 UTC (now=2026-01-29 12:06:54.212317535 +0000 UTC))\\\\\\\"\\\\nF0129 12:06:54.212498 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e02d977e34888952623f4ced5e073d7bf1ff360f8deb435dc577d2ecf146cb2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:33Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6538a934d097cd5a295952f64961a0a8fb5bfa5ebcd27a0a4dd3638029b9cdb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6538a934d097cd5a295952f64961a0a8fb5bfa5ebcd27a0a4dd3638029b9cdb1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:38Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:38 crc kubenswrapper[4753]: I0129 12:07:38.033555 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:38 crc kubenswrapper[4753]: I0129 12:07:38.033615 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:38 crc kubenswrapper[4753]: I0129 12:07:38.033630 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:38 crc kubenswrapper[4753]: I0129 12:07:38.033663 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:38 crc kubenswrapper[4753]: I0129 12:07:38.033677 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:38Z","lastTransitionTime":"2026-01-29T12:07:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:38 crc kubenswrapper[4753]: I0129 12:07:38.045296 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:38Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:38 crc kubenswrapper[4753]: I0129 12:07:38.060567 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-x8kzr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d753320a-61c2-4c0e-bd48-96d74b352114\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c7a667b7243fc36ff18cb4c017152fff3d5f4eb5e28e306ace44420cd722c5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tgns4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-x8kzr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:38Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:38 crc kubenswrapper[4753]: I0129 12:07:38.078845 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vwcjk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a99ab890-0a5a-4abb-86fb-a3731ff6b2c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e921d361518102483679b5ff7387e27858e315d637efb0f18e06215fe00f70a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd5300a3195f0f4d66a2789abcd65b4ade5652b5d05eb08bfd29932b97137464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd5300a3195f0f4d66a2789abcd65b4ade5652b5d05eb08bfd29932b97137464\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d95b548f1e6e754ff4a6f8d68364586d68b03662d0a679197fd0c99c41745691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d95b548f1e6e754ff4a6f8d68364586d68b03662d0a679197fd0c99c41745691\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b8b1c7640bd4881ba79f3471e309b2d1538aea8d0c8e01d3d7cfc3677d95725a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b8b1c7640bd4881ba79f3471e309b2d1538aea8d0c8e01d3d7cfc3677d95725a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0ba51b6e2180da460faa3599850175db9d7410880ba0214425fa275803f36d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0ba51b6e2180da460faa3599850175db9d7410880ba0214425fa275803f36d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0ed771b481178d6e12c669e122932ef25e1fe09db24c8f025f50a469400af184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0ed771b481178d6e12c669e122932ef25e1fe09db24c8f025f50a469400af184\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1990423fefacfb9b390af88b9af343c1bb4c02014db6bf8226918a7be9f4ad5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1990423fefacfb9b390af88b9af343c1bb4c02014db6bf8226918a7be9f4ad5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:59Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vwcjk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:38Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:38 crc kubenswrapper[4753]: I0129 12:07:38.096338 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rnbz9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b372210b-6e1b-4a80-b379-7c1d570712f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://346c36936603f4535d5ad3e6b60e451b05b4d376e2531c813f0c900c92caf5fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqzp6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:59Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rnbz9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:38Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:38 crc kubenswrapper[4753]: I0129 12:07:38.111289 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-9dtck" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"132de771-c8b4-403f-8916-8b453e7c6fc3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://30aac9e31bfe76eba96a50a30b03419b0b23ddd888485d609f37b84af28b8d3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gvjpd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://829af7b1f520503a0f36c60dd12b25d45c3831ba0cbb61f4f0a80582cb46da1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gvjpd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:07:21Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-9dtck\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:38Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:38 crc kubenswrapper[4753]: I0129 12:07:38.127668 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b108c546-a8c1-4bf9-814b-bfb5871b3013\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9842d9d6fed8c87bc20835964d3271b0be3117fedc658e6ef028db422d1ad478\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4fd4169036d3a646c9e5e4bfa8d86a92c557c3d8f77608ae5308dd0b8c45517\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b4a6bbb431e321e54013c96e3cca1537283cbdf65d89a978780550d603663fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://31bd7db7bddd3e16cb455f7af9fd3a9bca49919c8f1ffb676dec83c543ba7763\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:38Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:38 crc kubenswrapper[4753]: I0129 12:07:38.136746 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:38 crc kubenswrapper[4753]: I0129 12:07:38.136788 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:38 crc kubenswrapper[4753]: I0129 12:07:38.136800 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:38 crc kubenswrapper[4753]: I0129 12:07:38.136817 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:38 crc kubenswrapper[4753]: I0129 12:07:38.136827 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:38Z","lastTransitionTime":"2026-01-29T12:07:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:38 crc kubenswrapper[4753]: I0129 12:07:38.145559 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:38Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:38 crc kubenswrapper[4753]: I0129 12:07:38.163626 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:38Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:38 crc kubenswrapper[4753]: I0129 12:07:38.180551 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966eb8e59f82bacf20ebfa1b1d5bf9dc0a9cdb3b430ae83ed9bde106eb3fa521\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea7c92ec02a9cf6ec5fbd8efe22a066dfef9b48f042137964c5dd49578c99079\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:38Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:38 crc kubenswrapper[4753]: I0129 12:07:38.194240 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0310995-a7c7-47c3-ae6c-05daaaba92a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e56ac830346c0f85227bcebc9b76c2af8646767d4678934985d7b1f5846dcb82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-txdmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8391a68625fc3d8fdbc4f16b0b7b25e359798d08ce65ad8f482722910873418a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-txdmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:58Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7c24x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:38Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:38 crc kubenswrapper[4753]: I0129 12:07:38.239976 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:38 crc kubenswrapper[4753]: I0129 12:07:38.240042 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:38 crc kubenswrapper[4753]: I0129 12:07:38.240054 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:38 crc kubenswrapper[4753]: I0129 12:07:38.240072 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:38 crc kubenswrapper[4753]: I0129 12:07:38.240089 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:38Z","lastTransitionTime":"2026-01-29T12:07:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:38 crc kubenswrapper[4753]: I0129 12:07:38.342337 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:38 crc kubenswrapper[4753]: I0129 12:07:38.342382 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:38 crc kubenswrapper[4753]: I0129 12:07:38.342393 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:38 crc kubenswrapper[4753]: I0129 12:07:38.342410 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:38 crc kubenswrapper[4753]: I0129 12:07:38.342422 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:38Z","lastTransitionTime":"2026-01-29T12:07:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:38 crc kubenswrapper[4753]: I0129 12:07:38.445069 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:38 crc kubenswrapper[4753]: I0129 12:07:38.445119 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:38 crc kubenswrapper[4753]: I0129 12:07:38.445131 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:38 crc kubenswrapper[4753]: I0129 12:07:38.445151 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:38 crc kubenswrapper[4753]: I0129 12:07:38.445161 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:38Z","lastTransitionTime":"2026-01-29T12:07:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:38 crc kubenswrapper[4753]: I0129 12:07:38.548781 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:38 crc kubenswrapper[4753]: I0129 12:07:38.548855 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:38 crc kubenswrapper[4753]: I0129 12:07:38.548874 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:38 crc kubenswrapper[4753]: I0129 12:07:38.548900 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:38 crc kubenswrapper[4753]: I0129 12:07:38.548915 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:38Z","lastTransitionTime":"2026-01-29T12:07:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:38 crc kubenswrapper[4753]: I0129 12:07:38.597955 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-19 07:43:35.145011636 +0000 UTC Jan 29 12:07:38 crc kubenswrapper[4753]: I0129 12:07:38.651701 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:38 crc kubenswrapper[4753]: I0129 12:07:38.651744 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:38 crc kubenswrapper[4753]: I0129 12:07:38.651754 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:38 crc kubenswrapper[4753]: I0129 12:07:38.651771 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:38 crc kubenswrapper[4753]: I0129 12:07:38.651780 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:38Z","lastTransitionTime":"2026-01-29T12:07:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:38 crc kubenswrapper[4753]: I0129 12:07:38.754741 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:38 crc kubenswrapper[4753]: I0129 12:07:38.754792 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:38 crc kubenswrapper[4753]: I0129 12:07:38.754805 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:38 crc kubenswrapper[4753]: I0129 12:07:38.754826 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:38 crc kubenswrapper[4753]: I0129 12:07:38.754837 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:38Z","lastTransitionTime":"2026-01-29T12:07:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:38 crc kubenswrapper[4753]: I0129 12:07:38.857925 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:38 crc kubenswrapper[4753]: I0129 12:07:38.857972 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:38 crc kubenswrapper[4753]: I0129 12:07:38.857986 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:38 crc kubenswrapper[4753]: I0129 12:07:38.858006 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:38 crc kubenswrapper[4753]: I0129 12:07:38.858017 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:38Z","lastTransitionTime":"2026-01-29T12:07:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:38 crc kubenswrapper[4753]: I0129 12:07:38.888735 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 12:07:38 crc kubenswrapper[4753]: I0129 12:07:38.888800 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-p6m5g" Jan 29 12:07:38 crc kubenswrapper[4753]: I0129 12:07:38.888851 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 12:07:38 crc kubenswrapper[4753]: I0129 12:07:38.888748 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 12:07:38 crc kubenswrapper[4753]: E0129 12:07:38.888940 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 12:07:38 crc kubenswrapper[4753]: E0129 12:07:38.889122 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 12:07:38 crc kubenswrapper[4753]: E0129 12:07:38.889213 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-p6m5g" podUID="2412fa08-e643-4225-b494-eb999ea93fce" Jan 29 12:07:38 crc kubenswrapper[4753]: E0129 12:07:38.889312 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 12:07:38 crc kubenswrapper[4753]: I0129 12:07:38.961384 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:38 crc kubenswrapper[4753]: I0129 12:07:38.961442 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:38 crc kubenswrapper[4753]: I0129 12:07:38.961452 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:38 crc kubenswrapper[4753]: I0129 12:07:38.961474 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:38 crc kubenswrapper[4753]: I0129 12:07:38.961488 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:38Z","lastTransitionTime":"2026-01-29T12:07:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:39 crc kubenswrapper[4753]: I0129 12:07:39.064312 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:39 crc kubenswrapper[4753]: I0129 12:07:39.064365 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:39 crc kubenswrapper[4753]: I0129 12:07:39.064376 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:39 crc kubenswrapper[4753]: I0129 12:07:39.064393 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:39 crc kubenswrapper[4753]: I0129 12:07:39.064404 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:39Z","lastTransitionTime":"2026-01-29T12:07:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:39 crc kubenswrapper[4753]: I0129 12:07:39.137325 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2412fa08-e643-4225-b494-eb999ea93fce-metrics-certs\") pod \"network-metrics-daemon-p6m5g\" (UID: \"2412fa08-e643-4225-b494-eb999ea93fce\") " pod="openshift-multus/network-metrics-daemon-p6m5g" Jan 29 12:07:39 crc kubenswrapper[4753]: E0129 12:07:39.137724 4753 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 29 12:07:39 crc kubenswrapper[4753]: E0129 12:07:39.138449 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2412fa08-e643-4225-b494-eb999ea93fce-metrics-certs podName:2412fa08-e643-4225-b494-eb999ea93fce nodeName:}" failed. No retries permitted until 2026-01-29 12:07:55.138371059 +0000 UTC m=+89.390452514 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/2412fa08-e643-4225-b494-eb999ea93fce-metrics-certs") pod "network-metrics-daemon-p6m5g" (UID: "2412fa08-e643-4225-b494-eb999ea93fce") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 29 12:07:39 crc kubenswrapper[4753]: I0129 12:07:39.167194 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:39 crc kubenswrapper[4753]: I0129 12:07:39.167310 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:39 crc kubenswrapper[4753]: I0129 12:07:39.167335 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:39 crc kubenswrapper[4753]: I0129 12:07:39.167381 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:39 crc kubenswrapper[4753]: I0129 12:07:39.167394 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:39Z","lastTransitionTime":"2026-01-29T12:07:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:39 crc kubenswrapper[4753]: I0129 12:07:39.270525 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:39 crc kubenswrapper[4753]: I0129 12:07:39.271175 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:39 crc kubenswrapper[4753]: I0129 12:07:39.271324 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:39 crc kubenswrapper[4753]: I0129 12:07:39.271425 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:39 crc kubenswrapper[4753]: I0129 12:07:39.271499 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:39Z","lastTransitionTime":"2026-01-29T12:07:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:39 crc kubenswrapper[4753]: I0129 12:07:39.511575 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:39 crc kubenswrapper[4753]: I0129 12:07:39.511600 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:39 crc kubenswrapper[4753]: I0129 12:07:39.511608 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:39 crc kubenswrapper[4753]: I0129 12:07:39.511623 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:39 crc kubenswrapper[4753]: I0129 12:07:39.511633 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:39Z","lastTransitionTime":"2026-01-29T12:07:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:39 crc kubenswrapper[4753]: I0129 12:07:39.598534 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-05 17:47:43.419243138 +0000 UTC Jan 29 12:07:39 crc kubenswrapper[4753]: I0129 12:07:39.614628 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:39 crc kubenswrapper[4753]: I0129 12:07:39.614674 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:39 crc kubenswrapper[4753]: I0129 12:07:39.614687 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:39 crc kubenswrapper[4753]: I0129 12:07:39.614706 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:39 crc kubenswrapper[4753]: I0129 12:07:39.614718 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:39Z","lastTransitionTime":"2026-01-29T12:07:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:39 crc kubenswrapper[4753]: I0129 12:07:39.717300 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:39 crc kubenswrapper[4753]: I0129 12:07:39.717349 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:39 crc kubenswrapper[4753]: I0129 12:07:39.717361 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:39 crc kubenswrapper[4753]: I0129 12:07:39.717380 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:39 crc kubenswrapper[4753]: I0129 12:07:39.717391 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:39Z","lastTransitionTime":"2026-01-29T12:07:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:39 crc kubenswrapper[4753]: I0129 12:07:39.820834 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:39 crc kubenswrapper[4753]: I0129 12:07:39.820895 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:39 crc kubenswrapper[4753]: I0129 12:07:39.820910 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:39 crc kubenswrapper[4753]: I0129 12:07:39.820934 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:39 crc kubenswrapper[4753]: I0129 12:07:39.820948 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:39Z","lastTransitionTime":"2026-01-29T12:07:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:39 crc kubenswrapper[4753]: I0129 12:07:39.924211 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:39 crc kubenswrapper[4753]: I0129 12:07:39.924272 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:39 crc kubenswrapper[4753]: I0129 12:07:39.924284 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:39 crc kubenswrapper[4753]: I0129 12:07:39.924306 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:39 crc kubenswrapper[4753]: I0129 12:07:39.924317 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:39Z","lastTransitionTime":"2026-01-29T12:07:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:40 crc kubenswrapper[4753]: I0129 12:07:40.027861 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:40 crc kubenswrapper[4753]: I0129 12:07:40.027921 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:40 crc kubenswrapper[4753]: I0129 12:07:40.027936 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:40 crc kubenswrapper[4753]: I0129 12:07:40.027957 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:40 crc kubenswrapper[4753]: I0129 12:07:40.027970 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:40Z","lastTransitionTime":"2026-01-29T12:07:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:40 crc kubenswrapper[4753]: I0129 12:07:40.131106 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:40 crc kubenswrapper[4753]: I0129 12:07:40.131510 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:40 crc kubenswrapper[4753]: I0129 12:07:40.131595 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:40 crc kubenswrapper[4753]: I0129 12:07:40.131699 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:40 crc kubenswrapper[4753]: I0129 12:07:40.131780 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:40Z","lastTransitionTime":"2026-01-29T12:07:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:40 crc kubenswrapper[4753]: I0129 12:07:40.338977 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:40 crc kubenswrapper[4753]: I0129 12:07:40.339025 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:40 crc kubenswrapper[4753]: I0129 12:07:40.339037 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:40 crc kubenswrapper[4753]: I0129 12:07:40.339057 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:40 crc kubenswrapper[4753]: I0129 12:07:40.339069 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:40Z","lastTransitionTime":"2026-01-29T12:07:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:40 crc kubenswrapper[4753]: I0129 12:07:40.442388 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:40 crc kubenswrapper[4753]: I0129 12:07:40.442439 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:40 crc kubenswrapper[4753]: I0129 12:07:40.442450 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:40 crc kubenswrapper[4753]: I0129 12:07:40.442467 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:40 crc kubenswrapper[4753]: I0129 12:07:40.442485 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:40Z","lastTransitionTime":"2026-01-29T12:07:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:40 crc kubenswrapper[4753]: I0129 12:07:40.545556 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:40 crc kubenswrapper[4753]: I0129 12:07:40.545897 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:40 crc kubenswrapper[4753]: I0129 12:07:40.546005 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:40 crc kubenswrapper[4753]: I0129 12:07:40.546133 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:40 crc kubenswrapper[4753]: I0129 12:07:40.546255 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:40Z","lastTransitionTime":"2026-01-29T12:07:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:40 crc kubenswrapper[4753]: I0129 12:07:40.598877 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-13 13:45:42.85195506 +0000 UTC Jan 29 12:07:40 crc kubenswrapper[4753]: I0129 12:07:40.650412 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:40 crc kubenswrapper[4753]: I0129 12:07:40.650484 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:40 crc kubenswrapper[4753]: I0129 12:07:40.650524 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:40 crc kubenswrapper[4753]: I0129 12:07:40.650549 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:40 crc kubenswrapper[4753]: I0129 12:07:40.650563 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:40Z","lastTransitionTime":"2026-01-29T12:07:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:40 crc kubenswrapper[4753]: I0129 12:07:40.752965 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:40 crc kubenswrapper[4753]: I0129 12:07:40.753011 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:40 crc kubenswrapper[4753]: I0129 12:07:40.753024 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:40 crc kubenswrapper[4753]: I0129 12:07:40.753043 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:40 crc kubenswrapper[4753]: I0129 12:07:40.753055 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:40Z","lastTransitionTime":"2026-01-29T12:07:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:40 crc kubenswrapper[4753]: I0129 12:07:40.856483 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:40 crc kubenswrapper[4753]: I0129 12:07:40.856563 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:40 crc kubenswrapper[4753]: I0129 12:07:40.856605 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:40 crc kubenswrapper[4753]: I0129 12:07:40.856626 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:40 crc kubenswrapper[4753]: I0129 12:07:40.856638 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:40Z","lastTransitionTime":"2026-01-29T12:07:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:40 crc kubenswrapper[4753]: I0129 12:07:40.888313 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 12:07:40 crc kubenswrapper[4753]: I0129 12:07:40.888372 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 12:07:40 crc kubenswrapper[4753]: I0129 12:07:40.888338 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-p6m5g" Jan 29 12:07:40 crc kubenswrapper[4753]: I0129 12:07:40.888384 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 12:07:40 crc kubenswrapper[4753]: E0129 12:07:40.888589 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 12:07:40 crc kubenswrapper[4753]: E0129 12:07:40.888755 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 12:07:40 crc kubenswrapper[4753]: E0129 12:07:40.889509 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-p6m5g" podUID="2412fa08-e643-4225-b494-eb999ea93fce" Jan 29 12:07:40 crc kubenswrapper[4753]: E0129 12:07:40.889606 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 12:07:40 crc kubenswrapper[4753]: I0129 12:07:40.890728 4753 scope.go:117] "RemoveContainer" containerID="355e2c7c84bbb5dcfd75734a95868268a117bb2fad6db6fc7c23ab6afbf7fb58" Jan 29 12:07:40 crc kubenswrapper[4753]: I0129 12:07:40.959634 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:40 crc kubenswrapper[4753]: I0129 12:07:40.959960 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:40 crc kubenswrapper[4753]: I0129 12:07:40.959976 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:40 crc kubenswrapper[4753]: I0129 12:07:40.959998 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:40 crc kubenswrapper[4753]: I0129 12:07:40.960009 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:40Z","lastTransitionTime":"2026-01-29T12:07:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:41 crc kubenswrapper[4753]: I0129 12:07:41.062510 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:41 crc kubenswrapper[4753]: I0129 12:07:41.062548 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:41 crc kubenswrapper[4753]: I0129 12:07:41.062559 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:41 crc kubenswrapper[4753]: I0129 12:07:41.062575 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:41 crc kubenswrapper[4753]: I0129 12:07:41.062585 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:41Z","lastTransitionTime":"2026-01-29T12:07:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:41 crc kubenswrapper[4753]: I0129 12:07:41.165592 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:41 crc kubenswrapper[4753]: I0129 12:07:41.165637 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:41 crc kubenswrapper[4753]: I0129 12:07:41.165650 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:41 crc kubenswrapper[4753]: I0129 12:07:41.165668 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:41 crc kubenswrapper[4753]: I0129 12:07:41.165679 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:41Z","lastTransitionTime":"2026-01-29T12:07:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:41 crc kubenswrapper[4753]: I0129 12:07:41.267952 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:41 crc kubenswrapper[4753]: I0129 12:07:41.268016 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:41 crc kubenswrapper[4753]: I0129 12:07:41.268031 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:41 crc kubenswrapper[4753]: I0129 12:07:41.268056 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:41 crc kubenswrapper[4753]: I0129 12:07:41.268072 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:41Z","lastTransitionTime":"2026-01-29T12:07:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:41 crc kubenswrapper[4753]: I0129 12:07:41.370583 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:41 crc kubenswrapper[4753]: I0129 12:07:41.370623 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:41 crc kubenswrapper[4753]: I0129 12:07:41.370632 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:41 crc kubenswrapper[4753]: I0129 12:07:41.370648 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:41 crc kubenswrapper[4753]: I0129 12:07:41.370658 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:41Z","lastTransitionTime":"2026-01-29T12:07:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:41 crc kubenswrapper[4753]: I0129 12:07:41.646610 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-04 08:53:09.000830048 +0000 UTC Jan 29 12:07:41 crc kubenswrapper[4753]: I0129 12:07:41.650155 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:41 crc kubenswrapper[4753]: I0129 12:07:41.650197 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:41 crc kubenswrapper[4753]: I0129 12:07:41.650206 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:41 crc kubenswrapper[4753]: I0129 12:07:41.650237 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:41 crc kubenswrapper[4753]: I0129 12:07:41.650250 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:41Z","lastTransitionTime":"2026-01-29T12:07:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:41 crc kubenswrapper[4753]: I0129 12:07:41.655464 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-nzkvz_80bec2ab-0a88-4818-9339-760edda3b07e/ovnkube-controller/1.log" Jan 29 12:07:41 crc kubenswrapper[4753]: I0129 12:07:41.658688 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" event={"ID":"80bec2ab-0a88-4818-9339-760edda3b07e","Type":"ContainerStarted","Data":"e7138397dae5e90b30b0cdec02d29c108d423b3c94b07ea63e1560c4f02ed607"} Jan 29 12:07:41 crc kubenswrapper[4753]: I0129 12:07:41.659288 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" Jan 29 12:07:41 crc kubenswrapper[4753]: I0129 12:07:41.686427 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-5m2jf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0203345d-1e9f-4cfe-bde7-90f87221d1a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fdf3d79084d3e17b1f8cb0b285fd425280f34724c6a441104a2ec52b5217bfdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zzwn6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:58Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-5m2jf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:41Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:41 crc kubenswrapper[4753]: I0129 12:07:41.704505 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-p6m5g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2412fa08-e643-4225-b494-eb999ea93fce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zjjxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zjjxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:07:23Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-p6m5g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:41Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:41 crc kubenswrapper[4753]: I0129 12:07:41.723143 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c93f64b-e5af-4d6b-83e9-7fdfeb8548ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a9ec3e416b3b385c71b2fa28c0215fcb04b708e33fec2d85fdf0a75b848b027\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4fd3a920ea0c98edb2c8ad37d2a1a5d1ed50de5bc8c3e2f4d34c876b2cab54f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8dea62586ba5ac5a8ddc9e75b216a27593c8fe99516abc38c9c411a93842a372\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://44c03bba5a02d879c3b235cd65897fb30651871689c8f02b9526966817f79636\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44c03bba5a02d879c3b235cd65897fb30651871689c8f02b9526966817f79636\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:41Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:41 crc kubenswrapper[4753]: I0129 12:07:41.743431 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7962d88e183df5c4d91f5767f2007068ea2fdfd5c31d9d6ed609c2ab9ca26999\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:41Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:41 crc kubenswrapper[4753]: I0129 12:07:41.752941 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:41 crc kubenswrapper[4753]: I0129 12:07:41.752992 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:41 crc kubenswrapper[4753]: I0129 12:07:41.753005 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:41 crc kubenswrapper[4753]: I0129 12:07:41.753025 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:41 crc kubenswrapper[4753]: I0129 12:07:41.753042 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:41Z","lastTransitionTime":"2026-01-29T12:07:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:41 crc kubenswrapper[4753]: I0129 12:07:41.762653 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:41Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:41 crc kubenswrapper[4753]: I0129 12:07:41.786345 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-x8kzr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d753320a-61c2-4c0e-bd48-96d74b352114\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c7a667b7243fc36ff18cb4c017152fff3d5f4eb5e28e306ace44420cd722c5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tgns4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-x8kzr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:41Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:41 crc kubenswrapper[4753]: I0129 12:07:41.807309 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vwcjk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a99ab890-0a5a-4abb-86fb-a3731ff6b2c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e921d361518102483679b5ff7387e27858e315d637efb0f18e06215fe00f70a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd5300a3195f0f4d66a2789abcd65b4ade5652b5d05eb08bfd29932b97137464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd5300a3195f0f4d66a2789abcd65b4ade5652b5d05eb08bfd29932b97137464\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d95b548f1e6e754ff4a6f8d68364586d68b03662d0a679197fd0c99c41745691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d95b548f1e6e754ff4a6f8d68364586d68b03662d0a679197fd0c99c41745691\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b8b1c7640bd4881ba79f3471e309b2d1538aea8d0c8e01d3d7cfc3677d95725a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b8b1c7640bd4881ba79f3471e309b2d1538aea8d0c8e01d3d7cfc3677d95725a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0ba51b6e2180da460faa3599850175db9d7410880ba0214425fa275803f36d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0ba51b6e2180da460faa3599850175db9d7410880ba0214425fa275803f36d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0ed771b481178d6e12c669e122932ef25e1fe09db24c8f025f50a469400af184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0ed771b481178d6e12c669e122932ef25e1fe09db24c8f025f50a469400af184\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1990423fefacfb9b390af88b9af343c1bb4c02014db6bf8226918a7be9f4ad5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1990423fefacfb9b390af88b9af343c1bb4c02014db6bf8226918a7be9f4ad5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:59Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vwcjk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:41Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:41 crc kubenswrapper[4753]: I0129 12:07:41.827352 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"703a6b9d-a15d-4ed3-b00b-db7bd5d42c61\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cab3ab80a8fddbc591cd07fdbed800f525bb8a1b5505bf818ebb59cbcce73003\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7af8be0836a8ff7d08ff48d9487acdedd4b83127538d071d13b3215400c0f8de\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91d57a9cede640bdcec325175598b72dd00398436b730248728c7d9afd545b1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e86597c91848f265cd70410d57ac60eadba1d0322b9b897097366a41c93d8134\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34151f1c4f743e8fa0a2c27e644bbea9c3911e2252501ad8aa95ad3e4346222a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T12:06:54Z\\\",\\\"message\\\":\\\"ed a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3028279136/tls.crt::/tmp/serving-cert-3028279136/tls.key\\\\\\\"\\\\nI0129 12:06:54.190978 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 12:06:54.198044 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 12:06:54.198078 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 12:06:54.198139 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 12:06:54.198147 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 12:06:54.208089 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 12:06:54.208122 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 12:06:54.208128 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 12:06:54.208132 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 12:06:54.208135 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 12:06:54.208139 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 12:06:54.208143 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 12:06:54.208164 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI0129 12:06:54.212380 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-3028279136/tls.crt::/tmp/serving-cert-3028279136/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1769688397\\\\\\\\\\\\\\\" (2026-01-29 12:06:37 +0000 UTC to 2026-02-28 12:06:38 +0000 UTC (now=2026-01-29 12:06:54.212317535 +0000 UTC))\\\\\\\"\\\\nF0129 12:06:54.212498 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e02d977e34888952623f4ced5e073d7bf1ff360f8deb435dc577d2ecf146cb2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:33Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6538a934d097cd5a295952f64961a0a8fb5bfa5ebcd27a0a4dd3638029b9cdb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6538a934d097cd5a295952f64961a0a8fb5bfa5ebcd27a0a4dd3638029b9cdb1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:41Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:41 crc kubenswrapper[4753]: I0129 12:07:41.845113 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:41Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:41 crc kubenswrapper[4753]: I0129 12:07:41.857425 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:41 crc kubenswrapper[4753]: I0129 12:07:41.857454 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:41 crc kubenswrapper[4753]: I0129 12:07:41.857464 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:41 crc kubenswrapper[4753]: I0129 12:07:41.857482 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:41 crc kubenswrapper[4753]: I0129 12:07:41.857493 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:41Z","lastTransitionTime":"2026-01-29T12:07:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:41 crc kubenswrapper[4753]: I0129 12:07:41.858452 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:41Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:41 crc kubenswrapper[4753]: I0129 12:07:41.874252 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966eb8e59f82bacf20ebfa1b1d5bf9dc0a9cdb3b430ae83ed9bde106eb3fa521\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea7c92ec02a9cf6ec5fbd8efe22a066dfef9b48f042137964c5dd49578c99079\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:41Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:41 crc kubenswrapper[4753]: I0129 12:07:41.888181 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0310995-a7c7-47c3-ae6c-05daaaba92a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e56ac830346c0f85227bcebc9b76c2af8646767d4678934985d7b1f5846dcb82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-txdmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8391a68625fc3d8fdbc4f16b0b7b25e359798d08ce65ad8f482722910873418a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-txdmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:58Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7c24x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:41Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:41 crc kubenswrapper[4753]: I0129 12:07:41.904499 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rnbz9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b372210b-6e1b-4a80-b379-7c1d570712f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://346c36936603f4535d5ad3e6b60e451b05b4d376e2531c813f0c900c92caf5fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqzp6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:59Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rnbz9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:41Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:41 crc kubenswrapper[4753]: I0129 12:07:41.920965 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-9dtck" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"132de771-c8b4-403f-8916-8b453e7c6fc3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://30aac9e31bfe76eba96a50a30b03419b0b23ddd888485d609f37b84af28b8d3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gvjpd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://829af7b1f520503a0f36c60dd12b25d45c3831ba0cbb61f4f0a80582cb46da1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gvjpd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:07:21Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-9dtck\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:41Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:41 crc kubenswrapper[4753]: I0129 12:07:41.939649 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b108c546-a8c1-4bf9-814b-bfb5871b3013\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9842d9d6fed8c87bc20835964d3271b0be3117fedc658e6ef028db422d1ad478\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4fd4169036d3a646c9e5e4bfa8d86a92c557c3d8f77608ae5308dd0b8c45517\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b4a6bbb431e321e54013c96e3cca1537283cbdf65d89a978780550d603663fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://31bd7db7bddd3e16cb455f7af9fd3a9bca49919c8f1ffb676dec83c543ba7763\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:41Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:41 crc kubenswrapper[4753]: I0129 12:07:41.955954 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5d7ccf10988fd83d31d42be6f82403067a0cc50313c4d5dcddfb528507efffa4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:41Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:41 crc kubenswrapper[4753]: I0129 12:07:41.984717 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 12:07:41 crc kubenswrapper[4753]: E0129 12:07:41.984925 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 12:07:41 crc kubenswrapper[4753]: I0129 12:07:41.986969 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:41 crc kubenswrapper[4753]: I0129 12:07:41.987029 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:41 crc kubenswrapper[4753]: I0129 12:07:41.987042 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:41 crc kubenswrapper[4753]: I0129 12:07:41.987061 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:41 crc kubenswrapper[4753]: I0129 12:07:41.987121 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:41Z","lastTransitionTime":"2026-01-29T12:07:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:42 crc kubenswrapper[4753]: I0129 12:07:42.013011 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80bec2ab-0a88-4818-9339-760edda3b07e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f15016e29f1fc14f51b4d01e772071333fd8a885cacc1804b3a03dbcfc93836\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1e6a7d8df812546f17b3e026005c5062086e97e3fa364b3fa22cefec025e4eba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f71859413676dab4a435d43c11ad7cba315a12de744c3c8b161273f6ad36580c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0ec2a21ec299b5eed363668f39a270a8b03ea79b9de3dd23bda0c30579c3de3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91c1df8b6763320498420463a3841f0bfa4fcf753c355f549c3c1e93bf5206a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://05d1e99596530f467ecdae51aec8db191ac045cd85e121fc9835e2f3688f6ae7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7138397dae5e90b30b0cdec02d29c108d423b3c94b07ea63e1560c4f02ed607\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://355e2c7c84bbb5dcfd75734a95868268a117bb2fad6db6fc7c23ab6afbf7fb58\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T12:07:26Z\\\",\\\"message\\\":\\\"ift-kube-scheduler/scheduler_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.169:443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {39432221-5995-412b-967b-35e1a9405ec7}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF0129 12:07:26.279603 6197 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:26Z is after 2025-08-24T17:21:41Z]\\\\nI0129 12:07:26.279616 6197 obj_retry.go:303] Retry object setup: *v1.Pod openshift-i\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:25Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e40e5b13dfdc135ea09253d466bacdfc5c2ac3d28eabc76d833d71ac300905c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:07:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-nzkvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:42Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:42 crc kubenswrapper[4753]: I0129 12:07:42.102670 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:42 crc kubenswrapper[4753]: I0129 12:07:42.102755 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:42 crc kubenswrapper[4753]: I0129 12:07:42.102770 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:42 crc kubenswrapper[4753]: I0129 12:07:42.102792 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:42 crc kubenswrapper[4753]: I0129 12:07:42.102805 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:42Z","lastTransitionTime":"2026-01-29T12:07:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:42 crc kubenswrapper[4753]: I0129 12:07:42.206252 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:42 crc kubenswrapper[4753]: I0129 12:07:42.206289 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:42 crc kubenswrapper[4753]: I0129 12:07:42.206298 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:42 crc kubenswrapper[4753]: I0129 12:07:42.206315 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:42 crc kubenswrapper[4753]: I0129 12:07:42.206325 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:42Z","lastTransitionTime":"2026-01-29T12:07:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:42 crc kubenswrapper[4753]: I0129 12:07:42.309717 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:42 crc kubenswrapper[4753]: I0129 12:07:42.309774 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:42 crc kubenswrapper[4753]: I0129 12:07:42.309785 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:42 crc kubenswrapper[4753]: I0129 12:07:42.309803 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:42 crc kubenswrapper[4753]: I0129 12:07:42.309814 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:42Z","lastTransitionTime":"2026-01-29T12:07:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:42 crc kubenswrapper[4753]: I0129 12:07:42.413292 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:42 crc kubenswrapper[4753]: I0129 12:07:42.413373 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:42 crc kubenswrapper[4753]: I0129 12:07:42.413387 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:42 crc kubenswrapper[4753]: I0129 12:07:42.413415 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:42 crc kubenswrapper[4753]: I0129 12:07:42.413430 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:42Z","lastTransitionTime":"2026-01-29T12:07:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:42 crc kubenswrapper[4753]: I0129 12:07:42.516446 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:42 crc kubenswrapper[4753]: I0129 12:07:42.516497 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:42 crc kubenswrapper[4753]: I0129 12:07:42.516510 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:42 crc kubenswrapper[4753]: I0129 12:07:42.516529 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:42 crc kubenswrapper[4753]: I0129 12:07:42.516542 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:42Z","lastTransitionTime":"2026-01-29T12:07:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:42 crc kubenswrapper[4753]: I0129 12:07:42.619008 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:42 crc kubenswrapper[4753]: I0129 12:07:42.619046 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:42 crc kubenswrapper[4753]: I0129 12:07:42.619054 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:42 crc kubenswrapper[4753]: I0129 12:07:42.619070 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:42 crc kubenswrapper[4753]: I0129 12:07:42.619079 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:42Z","lastTransitionTime":"2026-01-29T12:07:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:42 crc kubenswrapper[4753]: I0129 12:07:42.647245 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-11 06:35:50.732982659 +0000 UTC Jan 29 12:07:42 crc kubenswrapper[4753]: I0129 12:07:42.722113 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:42 crc kubenswrapper[4753]: I0129 12:07:42.722167 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:42 crc kubenswrapper[4753]: I0129 12:07:42.722186 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:42 crc kubenswrapper[4753]: I0129 12:07:42.722212 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:42 crc kubenswrapper[4753]: I0129 12:07:42.722241 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:42Z","lastTransitionTime":"2026-01-29T12:07:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:42 crc kubenswrapper[4753]: I0129 12:07:42.824472 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:42 crc kubenswrapper[4753]: I0129 12:07:42.824510 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:42 crc kubenswrapper[4753]: I0129 12:07:42.824518 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:42 crc kubenswrapper[4753]: I0129 12:07:42.824535 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:42 crc kubenswrapper[4753]: I0129 12:07:42.824545 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:42Z","lastTransitionTime":"2026-01-29T12:07:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:42 crc kubenswrapper[4753]: I0129 12:07:42.991707 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 12:07:42 crc kubenswrapper[4753]: I0129 12:07:42.991793 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-p6m5g" Jan 29 12:07:42 crc kubenswrapper[4753]: I0129 12:07:42.991855 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 12:07:42 crc kubenswrapper[4753]: E0129 12:07:42.991902 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 12:07:42 crc kubenswrapper[4753]: E0129 12:07:42.992039 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-p6m5g" podUID="2412fa08-e643-4225-b494-eb999ea93fce" Jan 29 12:07:42 crc kubenswrapper[4753]: E0129 12:07:42.992154 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 12:07:42 crc kubenswrapper[4753]: I0129 12:07:42.993382 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:42 crc kubenswrapper[4753]: I0129 12:07:42.993418 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:42 crc kubenswrapper[4753]: I0129 12:07:42.993430 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:42 crc kubenswrapper[4753]: I0129 12:07:42.993447 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:42 crc kubenswrapper[4753]: I0129 12:07:42.993456 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:42Z","lastTransitionTime":"2026-01-29T12:07:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:43 crc kubenswrapper[4753]: I0129 12:07:43.095890 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:43 crc kubenswrapper[4753]: I0129 12:07:43.095948 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:43 crc kubenswrapper[4753]: I0129 12:07:43.095965 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:43 crc kubenswrapper[4753]: I0129 12:07:43.095987 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:43 crc kubenswrapper[4753]: I0129 12:07:43.096000 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:43Z","lastTransitionTime":"2026-01-29T12:07:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:43 crc kubenswrapper[4753]: I0129 12:07:43.199129 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:43 crc kubenswrapper[4753]: I0129 12:07:43.199160 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:43 crc kubenswrapper[4753]: I0129 12:07:43.199168 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:43 crc kubenswrapper[4753]: I0129 12:07:43.199184 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:43 crc kubenswrapper[4753]: I0129 12:07:43.199192 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:43Z","lastTransitionTime":"2026-01-29T12:07:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:43 crc kubenswrapper[4753]: I0129 12:07:43.302041 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:43 crc kubenswrapper[4753]: I0129 12:07:43.302092 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:43 crc kubenswrapper[4753]: I0129 12:07:43.302102 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:43 crc kubenswrapper[4753]: I0129 12:07:43.302122 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:43 crc kubenswrapper[4753]: I0129 12:07:43.302134 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:43Z","lastTransitionTime":"2026-01-29T12:07:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:43 crc kubenswrapper[4753]: I0129 12:07:43.405499 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:43 crc kubenswrapper[4753]: I0129 12:07:43.405540 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:43 crc kubenswrapper[4753]: I0129 12:07:43.405551 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:43 crc kubenswrapper[4753]: I0129 12:07:43.405569 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:43 crc kubenswrapper[4753]: I0129 12:07:43.405583 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:43Z","lastTransitionTime":"2026-01-29T12:07:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:43 crc kubenswrapper[4753]: I0129 12:07:43.508158 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:43 crc kubenswrapper[4753]: I0129 12:07:43.508207 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:43 crc kubenswrapper[4753]: I0129 12:07:43.508216 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:43 crc kubenswrapper[4753]: I0129 12:07:43.508247 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:43 crc kubenswrapper[4753]: I0129 12:07:43.508257 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:43Z","lastTransitionTime":"2026-01-29T12:07:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:43 crc kubenswrapper[4753]: I0129 12:07:43.611317 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:43 crc kubenswrapper[4753]: I0129 12:07:43.611377 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:43 crc kubenswrapper[4753]: I0129 12:07:43.611389 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:43 crc kubenswrapper[4753]: I0129 12:07:43.611411 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:43 crc kubenswrapper[4753]: I0129 12:07:43.611423 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:43Z","lastTransitionTime":"2026-01-29T12:07:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:43 crc kubenswrapper[4753]: I0129 12:07:43.648064 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-07 10:34:11.34263937 +0000 UTC Jan 29 12:07:43 crc kubenswrapper[4753]: I0129 12:07:43.667055 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-nzkvz_80bec2ab-0a88-4818-9339-760edda3b07e/ovnkube-controller/2.log" Jan 29 12:07:43 crc kubenswrapper[4753]: I0129 12:07:43.668014 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-nzkvz_80bec2ab-0a88-4818-9339-760edda3b07e/ovnkube-controller/1.log" Jan 29 12:07:43 crc kubenswrapper[4753]: I0129 12:07:43.670953 4753 generic.go:334] "Generic (PLEG): container finished" podID="80bec2ab-0a88-4818-9339-760edda3b07e" containerID="e7138397dae5e90b30b0cdec02d29c108d423b3c94b07ea63e1560c4f02ed607" exitCode=1 Jan 29 12:07:43 crc kubenswrapper[4753]: I0129 12:07:43.671065 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" event={"ID":"80bec2ab-0a88-4818-9339-760edda3b07e","Type":"ContainerDied","Data":"e7138397dae5e90b30b0cdec02d29c108d423b3c94b07ea63e1560c4f02ed607"} Jan 29 12:07:43 crc kubenswrapper[4753]: I0129 12:07:43.671256 4753 scope.go:117] "RemoveContainer" containerID="355e2c7c84bbb5dcfd75734a95868268a117bb2fad6db6fc7c23ab6afbf7fb58" Jan 29 12:07:43 crc kubenswrapper[4753]: I0129 12:07:43.672075 4753 scope.go:117] "RemoveContainer" containerID="e7138397dae5e90b30b0cdec02d29c108d423b3c94b07ea63e1560c4f02ed607" Jan 29 12:07:43 crc kubenswrapper[4753]: E0129 12:07:43.672317 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-nzkvz_openshift-ovn-kubernetes(80bec2ab-0a88-4818-9339-760edda3b07e)\"" pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" podUID="80bec2ab-0a88-4818-9339-760edda3b07e" Jan 29 12:07:43 crc kubenswrapper[4753]: I0129 12:07:43.688550 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5d7ccf10988fd83d31d42be6f82403067a0cc50313c4d5dcddfb528507efffa4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:43Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:43 crc kubenswrapper[4753]: I0129 12:07:43.714108 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:43 crc kubenswrapper[4753]: I0129 12:07:43.714150 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:43 crc kubenswrapper[4753]: I0129 12:07:43.714163 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:43 crc kubenswrapper[4753]: I0129 12:07:43.714180 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:43 crc kubenswrapper[4753]: I0129 12:07:43.714299 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:43Z","lastTransitionTime":"2026-01-29T12:07:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:43 crc kubenswrapper[4753]: I0129 12:07:43.718146 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80bec2ab-0a88-4818-9339-760edda3b07e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f15016e29f1fc14f51b4d01e772071333fd8a885cacc1804b3a03dbcfc93836\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1e6a7d8df812546f17b3e026005c5062086e97e3fa364b3fa22cefec025e4eba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f71859413676dab4a435d43c11ad7cba315a12de744c3c8b161273f6ad36580c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0ec2a21ec299b5eed363668f39a270a8b03ea79b9de3dd23bda0c30579c3de3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91c1df8b6763320498420463a3841f0bfa4fcf753c355f549c3c1e93bf5206a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://05d1e99596530f467ecdae51aec8db191ac045cd85e121fc9835e2f3688f6ae7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7138397dae5e90b30b0cdec02d29c108d423b3c94b07ea63e1560c4f02ed607\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://355e2c7c84bbb5dcfd75734a95868268a117bb2fad6db6fc7c23ab6afbf7fb58\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T12:07:26Z\\\",\\\"message\\\":\\\"ift-kube-scheduler/scheduler_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.169:443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {39432221-5995-412b-967b-35e1a9405ec7}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF0129 12:07:26.279603 6197 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:26Z is after 2025-08-24T17:21:41Z]\\\\nI0129 12:07:26.279616 6197 obj_retry.go:303] Retry object setup: *v1.Pod openshift-i\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:25Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e7138397dae5e90b30b0cdec02d29c108d423b3c94b07ea63e1560c4f02ed607\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T12:07:43Z\\\",\\\"message\\\":\\\"rnalversions/factory.go:140\\\\nI0129 12:07:43.077687 6477 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0129 12:07:43.077875 6477 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0129 12:07:43.078075 6477 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0129 12:07:43.078370 6477 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0129 12:07:43.078709 6477 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0129 12:07:43.078725 6477 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0129 12:07:43.078784 6477 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0129 12:07:43.078798 6477 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0129 12:07:43.078806 6477 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0129 12:07:43.078847 6477 factory.go:656] Stopping watch factory\\\\nI0129 12:07:43.078862 6477 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0129 12:07:43.078866 6477 ovnkube.go:599] Stopped ovnkube\\\\nI0129 12:07:43.078876 6477 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0129 12\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e40e5b13dfdc135ea09253d466bacdfc5c2ac3d28eabc76d833d71ac300905c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:07:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-nzkvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:43Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:43 crc kubenswrapper[4753]: I0129 12:07:43.728099 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:43 crc kubenswrapper[4753]: I0129 12:07:43.728239 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:43 crc kubenswrapper[4753]: I0129 12:07:43.728340 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:43 crc kubenswrapper[4753]: I0129 12:07:43.728444 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:43 crc kubenswrapper[4753]: I0129 12:07:43.728543 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:43Z","lastTransitionTime":"2026-01-29T12:07:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:43 crc kubenswrapper[4753]: I0129 12:07:43.735732 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c93f64b-e5af-4d6b-83e9-7fdfeb8548ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a9ec3e416b3b385c71b2fa28c0215fcb04b708e33fec2d85fdf0a75b848b027\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4fd3a920ea0c98edb2c8ad37d2a1a5d1ed50de5bc8c3e2f4d34c876b2cab54f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8dea62586ba5ac5a8ddc9e75b216a27593c8fe99516abc38c9c411a93842a372\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://44c03bba5a02d879c3b235cd65897fb30651871689c8f02b9526966817f79636\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44c03bba5a02d879c3b235cd65897fb30651871689c8f02b9526966817f79636\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:43Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:43 crc kubenswrapper[4753]: E0129 12:07:43.748036 4753 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:43Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:43Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:43Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:43Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:43Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:43Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:43Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:43Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7536a030-b66d-4df9-a1ec-9890f7ad99e7\\\",\\\"systemUUID\\\":\\\"ce89c1ce-259c-4b78-b348-3ef96afb6944\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:43Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:43 crc kubenswrapper[4753]: I0129 12:07:43.752702 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7962d88e183df5c4d91f5767f2007068ea2fdfd5c31d9d6ed609c2ab9ca26999\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:43Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:43 crc kubenswrapper[4753]: I0129 12:07:43.754011 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:43 crc kubenswrapper[4753]: I0129 12:07:43.754050 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:43 crc kubenswrapper[4753]: I0129 12:07:43.754062 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:43 crc kubenswrapper[4753]: I0129 12:07:43.754080 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:43 crc kubenswrapper[4753]: I0129 12:07:43.754089 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:43Z","lastTransitionTime":"2026-01-29T12:07:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:43 crc kubenswrapper[4753]: I0129 12:07:43.765639 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-5m2jf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0203345d-1e9f-4cfe-bde7-90f87221d1a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fdf3d79084d3e17b1f8cb0b285fd425280f34724c6a441104a2ec52b5217bfdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zzwn6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:58Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-5m2jf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:43Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:43 crc kubenswrapper[4753]: E0129 12:07:43.768263 4753 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:43Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:43Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:43Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:43Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:43Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:43Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:43Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:43Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7536a030-b66d-4df9-a1ec-9890f7ad99e7\\\",\\\"systemUUID\\\":\\\"ce89c1ce-259c-4b78-b348-3ef96afb6944\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:43Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:43 crc kubenswrapper[4753]: I0129 12:07:43.777674 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:43 crc kubenswrapper[4753]: I0129 12:07:43.777754 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:43 crc kubenswrapper[4753]: I0129 12:07:43.777781 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:43 crc kubenswrapper[4753]: I0129 12:07:43.777825 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:43 crc kubenswrapper[4753]: I0129 12:07:43.777852 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:43Z","lastTransitionTime":"2026-01-29T12:07:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:43 crc kubenswrapper[4753]: I0129 12:07:43.860407 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-p6m5g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2412fa08-e643-4225-b494-eb999ea93fce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zjjxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zjjxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:07:23Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-p6m5g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:43Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:43 crc kubenswrapper[4753]: I0129 12:07:43.876665 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"703a6b9d-a15d-4ed3-b00b-db7bd5d42c61\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cab3ab80a8fddbc591cd07fdbed800f525bb8a1b5505bf818ebb59cbcce73003\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7af8be0836a8ff7d08ff48d9487acdedd4b83127538d071d13b3215400c0f8de\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91d57a9cede640bdcec325175598b72dd00398436b730248728c7d9afd545b1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e86597c91848f265cd70410d57ac60eadba1d0322b9b897097366a41c93d8134\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34151f1c4f743e8fa0a2c27e644bbea9c3911e2252501ad8aa95ad3e4346222a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T12:06:54Z\\\",\\\"message\\\":\\\"ed a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3028279136/tls.crt::/tmp/serving-cert-3028279136/tls.key\\\\\\\"\\\\nI0129 12:06:54.190978 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 12:06:54.198044 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 12:06:54.198078 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 12:06:54.198139 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 12:06:54.198147 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 12:06:54.208089 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 12:06:54.208122 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 12:06:54.208128 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 12:06:54.208132 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 12:06:54.208135 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 12:06:54.208139 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 12:06:54.208143 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 12:06:54.208164 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI0129 12:06:54.212380 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-3028279136/tls.crt::/tmp/serving-cert-3028279136/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1769688397\\\\\\\\\\\\\\\" (2026-01-29 12:06:37 +0000 UTC to 2026-02-28 12:06:38 +0000 UTC (now=2026-01-29 12:06:54.212317535 +0000 UTC))\\\\\\\"\\\\nF0129 12:06:54.212498 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e02d977e34888952623f4ced5e073d7bf1ff360f8deb435dc577d2ecf146cb2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:33Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6538a934d097cd5a295952f64961a0a8fb5bfa5ebcd27a0a4dd3638029b9cdb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6538a934d097cd5a295952f64961a0a8fb5bfa5ebcd27a0a4dd3638029b9cdb1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:43Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:43 crc kubenswrapper[4753]: I0129 12:07:43.888595 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 12:07:43 crc kubenswrapper[4753]: E0129 12:07:43.888907 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 12:07:43 crc kubenswrapper[4753]: I0129 12:07:43.891418 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:43Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:43 crc kubenswrapper[4753]: I0129 12:07:43.903299 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-x8kzr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d753320a-61c2-4c0e-bd48-96d74b352114\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c7a667b7243fc36ff18cb4c017152fff3d5f4eb5e28e306ace44420cd722c5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tgns4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-x8kzr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:43Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:43 crc kubenswrapper[4753]: I0129 12:07:43.920323 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vwcjk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a99ab890-0a5a-4abb-86fb-a3731ff6b2c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e921d361518102483679b5ff7387e27858e315d637efb0f18e06215fe00f70a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd5300a3195f0f4d66a2789abcd65b4ade5652b5d05eb08bfd29932b97137464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd5300a3195f0f4d66a2789abcd65b4ade5652b5d05eb08bfd29932b97137464\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d95b548f1e6e754ff4a6f8d68364586d68b03662d0a679197fd0c99c41745691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d95b548f1e6e754ff4a6f8d68364586d68b03662d0a679197fd0c99c41745691\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b8b1c7640bd4881ba79f3471e309b2d1538aea8d0c8e01d3d7cfc3677d95725a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b8b1c7640bd4881ba79f3471e309b2d1538aea8d0c8e01d3d7cfc3677d95725a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0ba51b6e2180da460faa3599850175db9d7410880ba0214425fa275803f36d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0ba51b6e2180da460faa3599850175db9d7410880ba0214425fa275803f36d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0ed771b481178d6e12c669e122932ef25e1fe09db24c8f025f50a469400af184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0ed771b481178d6e12c669e122932ef25e1fe09db24c8f025f50a469400af184\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1990423fefacfb9b390af88b9af343c1bb4c02014db6bf8226918a7be9f4ad5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1990423fefacfb9b390af88b9af343c1bb4c02014db6bf8226918a7be9f4ad5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:59Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vwcjk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:43Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:43 crc kubenswrapper[4753]: I0129 12:07:43.932304 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0310995-a7c7-47c3-ae6c-05daaaba92a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e56ac830346c0f85227bcebc9b76c2af8646767d4678934985d7b1f5846dcb82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-txdmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8391a68625fc3d8fdbc4f16b0b7b25e359798d08ce65ad8f482722910873418a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-txdmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:58Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7c24x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:43Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:43 crc kubenswrapper[4753]: I0129 12:07:43.946410 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rnbz9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b372210b-6e1b-4a80-b379-7c1d570712f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://346c36936603f4535d5ad3e6b60e451b05b4d376e2531c813f0c900c92caf5fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqzp6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:59Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rnbz9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:43Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:43 crc kubenswrapper[4753]: I0129 12:07:43.959415 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-9dtck" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"132de771-c8b4-403f-8916-8b453e7c6fc3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://30aac9e31bfe76eba96a50a30b03419b0b23ddd888485d609f37b84af28b8d3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gvjpd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://829af7b1f520503a0f36c60dd12b25d45c3831ba0cbb61f4f0a80582cb46da1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gvjpd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:07:21Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-9dtck\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:43Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:43 crc kubenswrapper[4753]: I0129 12:07:43.975794 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b108c546-a8c1-4bf9-814b-bfb5871b3013\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9842d9d6fed8c87bc20835964d3271b0be3117fedc658e6ef028db422d1ad478\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4fd4169036d3a646c9e5e4bfa8d86a92c557c3d8f77608ae5308dd0b8c45517\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b4a6bbb431e321e54013c96e3cca1537283cbdf65d89a978780550d603663fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://31bd7db7bddd3e16cb455f7af9fd3a9bca49919c8f1ffb676dec83c543ba7763\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:43Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:44 crc kubenswrapper[4753]: I0129 12:07:44.102048 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:44Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:44 crc kubenswrapper[4753]: E0129 12:07:44.103522 4753 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:43Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:43Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:43Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:43Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:43Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:43Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:43Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:43Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7536a030-b66d-4df9-a1ec-9890f7ad99e7\\\",\\\"systemUUID\\\":\\\"ce89c1ce-259c-4b78-b348-3ef96afb6944\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:44Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:44 crc kubenswrapper[4753]: I0129 12:07:44.107624 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:44 crc kubenswrapper[4753]: I0129 12:07:44.107682 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:44 crc kubenswrapper[4753]: I0129 12:07:44.107696 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:44 crc kubenswrapper[4753]: I0129 12:07:44.107717 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:44 crc kubenswrapper[4753]: I0129 12:07:44.107728 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:44Z","lastTransitionTime":"2026-01-29T12:07:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:44 crc kubenswrapper[4753]: I0129 12:07:44.117592 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:44Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:44 crc kubenswrapper[4753]: E0129 12:07:44.125448 4753 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:44Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:44Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:44Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:44Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:44Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:44Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:44Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:44Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7536a030-b66d-4df9-a1ec-9890f7ad99e7\\\",\\\"systemUUID\\\":\\\"ce89c1ce-259c-4b78-b348-3ef96afb6944\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:44Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:44 crc kubenswrapper[4753]: I0129 12:07:44.130132 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:44 crc kubenswrapper[4753]: I0129 12:07:44.130200 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:44 crc kubenswrapper[4753]: I0129 12:07:44.130214 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:44 crc kubenswrapper[4753]: I0129 12:07:44.130264 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:44 crc kubenswrapper[4753]: I0129 12:07:44.130278 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:44Z","lastTransitionTime":"2026-01-29T12:07:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:44 crc kubenswrapper[4753]: I0129 12:07:44.135118 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966eb8e59f82bacf20ebfa1b1d5bf9dc0a9cdb3b430ae83ed9bde106eb3fa521\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea7c92ec02a9cf6ec5fbd8efe22a066dfef9b48f042137964c5dd49578c99079\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:44Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:44 crc kubenswrapper[4753]: E0129 12:07:44.147853 4753 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:44Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:44Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:44Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:44Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:44Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:44Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:44Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:44Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7536a030-b66d-4df9-a1ec-9890f7ad99e7\\\",\\\"systemUUID\\\":\\\"ce89c1ce-259c-4b78-b348-3ef96afb6944\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:44Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:44 crc kubenswrapper[4753]: E0129 12:07:44.147988 4753 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 29 12:07:44 crc kubenswrapper[4753]: I0129 12:07:44.150010 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:44 crc kubenswrapper[4753]: I0129 12:07:44.150068 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:44 crc kubenswrapper[4753]: I0129 12:07:44.150081 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:44 crc kubenswrapper[4753]: I0129 12:07:44.150105 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:44 crc kubenswrapper[4753]: I0129 12:07:44.150119 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:44Z","lastTransitionTime":"2026-01-29T12:07:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:44 crc kubenswrapper[4753]: I0129 12:07:44.252959 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:44 crc kubenswrapper[4753]: I0129 12:07:44.253004 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:44 crc kubenswrapper[4753]: I0129 12:07:44.253013 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:44 crc kubenswrapper[4753]: I0129 12:07:44.253034 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:44 crc kubenswrapper[4753]: I0129 12:07:44.253052 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:44Z","lastTransitionTime":"2026-01-29T12:07:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:44 crc kubenswrapper[4753]: I0129 12:07:44.356107 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:44 crc kubenswrapper[4753]: I0129 12:07:44.356504 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:44 crc kubenswrapper[4753]: I0129 12:07:44.356517 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:44 crc kubenswrapper[4753]: I0129 12:07:44.356542 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:44 crc kubenswrapper[4753]: I0129 12:07:44.356555 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:44Z","lastTransitionTime":"2026-01-29T12:07:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:44 crc kubenswrapper[4753]: I0129 12:07:44.459822 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:44 crc kubenswrapper[4753]: I0129 12:07:44.459886 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:44 crc kubenswrapper[4753]: I0129 12:07:44.459906 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:44 crc kubenswrapper[4753]: I0129 12:07:44.459930 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:44 crc kubenswrapper[4753]: I0129 12:07:44.459947 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:44Z","lastTransitionTime":"2026-01-29T12:07:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:44 crc kubenswrapper[4753]: I0129 12:07:44.563430 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:44 crc kubenswrapper[4753]: I0129 12:07:44.563479 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:44 crc kubenswrapper[4753]: I0129 12:07:44.563491 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:44 crc kubenswrapper[4753]: I0129 12:07:44.563510 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:44 crc kubenswrapper[4753]: I0129 12:07:44.563522 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:44Z","lastTransitionTime":"2026-01-29T12:07:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:44 crc kubenswrapper[4753]: I0129 12:07:44.648278 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-16 00:02:59.272082072 +0000 UTC Jan 29 12:07:44 crc kubenswrapper[4753]: I0129 12:07:44.666379 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:44 crc kubenswrapper[4753]: I0129 12:07:44.666430 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:44 crc kubenswrapper[4753]: I0129 12:07:44.666439 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:44 crc kubenswrapper[4753]: I0129 12:07:44.666456 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:44 crc kubenswrapper[4753]: I0129 12:07:44.666467 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:44Z","lastTransitionTime":"2026-01-29T12:07:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:44 crc kubenswrapper[4753]: I0129 12:07:44.675942 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-nzkvz_80bec2ab-0a88-4818-9339-760edda3b07e/ovnkube-controller/2.log" Jan 29 12:07:44 crc kubenswrapper[4753]: I0129 12:07:44.769107 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:44 crc kubenswrapper[4753]: I0129 12:07:44.769152 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:44 crc kubenswrapper[4753]: I0129 12:07:44.769161 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:44 crc kubenswrapper[4753]: I0129 12:07:44.769180 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:44 crc kubenswrapper[4753]: I0129 12:07:44.769192 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:44Z","lastTransitionTime":"2026-01-29T12:07:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:44 crc kubenswrapper[4753]: I0129 12:07:44.871358 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:44 crc kubenswrapper[4753]: I0129 12:07:44.872279 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:44 crc kubenswrapper[4753]: I0129 12:07:44.872294 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:44 crc kubenswrapper[4753]: I0129 12:07:44.872315 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:44 crc kubenswrapper[4753]: I0129 12:07:44.872324 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:44Z","lastTransitionTime":"2026-01-29T12:07:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:44 crc kubenswrapper[4753]: I0129 12:07:44.887864 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 12:07:44 crc kubenswrapper[4753]: I0129 12:07:44.887918 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 12:07:44 crc kubenswrapper[4753]: I0129 12:07:44.887868 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-p6m5g" Jan 29 12:07:44 crc kubenswrapper[4753]: E0129 12:07:44.888149 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 12:07:44 crc kubenswrapper[4753]: E0129 12:07:44.888333 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-p6m5g" podUID="2412fa08-e643-4225-b494-eb999ea93fce" Jan 29 12:07:44 crc kubenswrapper[4753]: E0129 12:07:44.888416 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 12:07:44 crc kubenswrapper[4753]: I0129 12:07:44.975565 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:44 crc kubenswrapper[4753]: I0129 12:07:44.975657 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:44 crc kubenswrapper[4753]: I0129 12:07:44.975676 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:44 crc kubenswrapper[4753]: I0129 12:07:44.975701 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:44 crc kubenswrapper[4753]: I0129 12:07:44.975719 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:44Z","lastTransitionTime":"2026-01-29T12:07:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:45 crc kubenswrapper[4753]: I0129 12:07:45.078788 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:45 crc kubenswrapper[4753]: I0129 12:07:45.078839 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:45 crc kubenswrapper[4753]: I0129 12:07:45.078849 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:45 crc kubenswrapper[4753]: I0129 12:07:45.078870 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:45 crc kubenswrapper[4753]: I0129 12:07:45.078880 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:45Z","lastTransitionTime":"2026-01-29T12:07:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:45 crc kubenswrapper[4753]: I0129 12:07:45.181723 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:45 crc kubenswrapper[4753]: I0129 12:07:45.181784 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:45 crc kubenswrapper[4753]: I0129 12:07:45.181802 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:45 crc kubenswrapper[4753]: I0129 12:07:45.181823 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:45 crc kubenswrapper[4753]: I0129 12:07:45.181834 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:45Z","lastTransitionTime":"2026-01-29T12:07:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:45 crc kubenswrapper[4753]: I0129 12:07:45.285471 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:45 crc kubenswrapper[4753]: I0129 12:07:45.285552 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:45 crc kubenswrapper[4753]: I0129 12:07:45.285570 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:45 crc kubenswrapper[4753]: I0129 12:07:45.285602 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:45 crc kubenswrapper[4753]: I0129 12:07:45.285620 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:45Z","lastTransitionTime":"2026-01-29T12:07:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:45 crc kubenswrapper[4753]: I0129 12:07:45.389367 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:45 crc kubenswrapper[4753]: I0129 12:07:45.389476 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:45 crc kubenswrapper[4753]: I0129 12:07:45.389519 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:45 crc kubenswrapper[4753]: I0129 12:07:45.389554 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:45 crc kubenswrapper[4753]: I0129 12:07:45.389577 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:45Z","lastTransitionTime":"2026-01-29T12:07:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:45 crc kubenswrapper[4753]: I0129 12:07:45.492877 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:45 crc kubenswrapper[4753]: I0129 12:07:45.492935 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:45 crc kubenswrapper[4753]: I0129 12:07:45.492947 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:45 crc kubenswrapper[4753]: I0129 12:07:45.492965 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:45 crc kubenswrapper[4753]: I0129 12:07:45.492979 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:45Z","lastTransitionTime":"2026-01-29T12:07:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:45 crc kubenswrapper[4753]: I0129 12:07:45.595993 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:45 crc kubenswrapper[4753]: I0129 12:07:45.596056 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:45 crc kubenswrapper[4753]: I0129 12:07:45.596069 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:45 crc kubenswrapper[4753]: I0129 12:07:45.596089 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:45 crc kubenswrapper[4753]: I0129 12:07:45.596101 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:45Z","lastTransitionTime":"2026-01-29T12:07:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:45 crc kubenswrapper[4753]: I0129 12:07:45.649406 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-14 02:13:35.279963203 +0000 UTC Jan 29 12:07:45 crc kubenswrapper[4753]: I0129 12:07:45.698987 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:45 crc kubenswrapper[4753]: I0129 12:07:45.699030 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:45 crc kubenswrapper[4753]: I0129 12:07:45.699042 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:45 crc kubenswrapper[4753]: I0129 12:07:45.699061 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:45 crc kubenswrapper[4753]: I0129 12:07:45.699076 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:45Z","lastTransitionTime":"2026-01-29T12:07:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:45 crc kubenswrapper[4753]: I0129 12:07:45.801132 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:45 crc kubenswrapper[4753]: I0129 12:07:45.801170 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:45 crc kubenswrapper[4753]: I0129 12:07:45.801181 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:45 crc kubenswrapper[4753]: I0129 12:07:45.801201 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:45 crc kubenswrapper[4753]: I0129 12:07:45.801211 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:45Z","lastTransitionTime":"2026-01-29T12:07:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:45 crc kubenswrapper[4753]: I0129 12:07:45.888538 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 12:07:45 crc kubenswrapper[4753]: E0129 12:07:45.888767 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 12:07:45 crc kubenswrapper[4753]: I0129 12:07:45.903157 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:45 crc kubenswrapper[4753]: I0129 12:07:45.903191 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:45 crc kubenswrapper[4753]: I0129 12:07:45.903201 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:45 crc kubenswrapper[4753]: I0129 12:07:45.903214 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:45 crc kubenswrapper[4753]: I0129 12:07:45.903241 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:45Z","lastTransitionTime":"2026-01-29T12:07:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:46 crc kubenswrapper[4753]: I0129 12:07:46.005766 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:46 crc kubenswrapper[4753]: I0129 12:07:46.005815 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:46 crc kubenswrapper[4753]: I0129 12:07:46.005828 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:46 crc kubenswrapper[4753]: I0129 12:07:46.005847 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:46 crc kubenswrapper[4753]: I0129 12:07:46.005857 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:46Z","lastTransitionTime":"2026-01-29T12:07:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:46 crc kubenswrapper[4753]: I0129 12:07:46.108263 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:46 crc kubenswrapper[4753]: I0129 12:07:46.108302 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:46 crc kubenswrapper[4753]: I0129 12:07:46.108314 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:46 crc kubenswrapper[4753]: I0129 12:07:46.108334 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:46 crc kubenswrapper[4753]: I0129 12:07:46.108388 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:46Z","lastTransitionTime":"2026-01-29T12:07:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:46 crc kubenswrapper[4753]: I0129 12:07:46.211407 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:46 crc kubenswrapper[4753]: I0129 12:07:46.211466 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:46 crc kubenswrapper[4753]: I0129 12:07:46.211484 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:46 crc kubenswrapper[4753]: I0129 12:07:46.211506 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:46 crc kubenswrapper[4753]: I0129 12:07:46.211516 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:46Z","lastTransitionTime":"2026-01-29T12:07:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:46 crc kubenswrapper[4753]: I0129 12:07:46.315343 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:46 crc kubenswrapper[4753]: I0129 12:07:46.315426 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:46 crc kubenswrapper[4753]: I0129 12:07:46.315446 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:46 crc kubenswrapper[4753]: I0129 12:07:46.315470 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:46 crc kubenswrapper[4753]: I0129 12:07:46.315483 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:46Z","lastTransitionTime":"2026-01-29T12:07:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:46 crc kubenswrapper[4753]: I0129 12:07:46.419665 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:46 crc kubenswrapper[4753]: I0129 12:07:46.419709 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:46 crc kubenswrapper[4753]: I0129 12:07:46.419733 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:46 crc kubenswrapper[4753]: I0129 12:07:46.419770 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:46 crc kubenswrapper[4753]: I0129 12:07:46.419784 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:46Z","lastTransitionTime":"2026-01-29T12:07:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:46 crc kubenswrapper[4753]: I0129 12:07:46.525956 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:46 crc kubenswrapper[4753]: I0129 12:07:46.526018 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:46 crc kubenswrapper[4753]: I0129 12:07:46.526029 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:46 crc kubenswrapper[4753]: I0129 12:07:46.526044 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:46 crc kubenswrapper[4753]: I0129 12:07:46.526062 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:46Z","lastTransitionTime":"2026-01-29T12:07:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:46 crc kubenswrapper[4753]: I0129 12:07:46.628516 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:46 crc kubenswrapper[4753]: I0129 12:07:46.628812 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:46 crc kubenswrapper[4753]: I0129 12:07:46.628895 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:46 crc kubenswrapper[4753]: I0129 12:07:46.628979 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:46 crc kubenswrapper[4753]: I0129 12:07:46.629058 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:46Z","lastTransitionTime":"2026-01-29T12:07:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:46 crc kubenswrapper[4753]: I0129 12:07:46.650077 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-17 12:06:56.200209425 +0000 UTC Jan 29 12:07:46 crc kubenswrapper[4753]: I0129 12:07:46.731749 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:46 crc kubenswrapper[4753]: I0129 12:07:46.731820 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:46 crc kubenswrapper[4753]: I0129 12:07:46.731840 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:46 crc kubenswrapper[4753]: I0129 12:07:46.731918 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:46 crc kubenswrapper[4753]: I0129 12:07:46.731938 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:46Z","lastTransitionTime":"2026-01-29T12:07:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:46 crc kubenswrapper[4753]: I0129 12:07:46.835338 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:46 crc kubenswrapper[4753]: I0129 12:07:46.835399 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:46 crc kubenswrapper[4753]: I0129 12:07:46.835414 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:46 crc kubenswrapper[4753]: I0129 12:07:46.835434 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:46 crc kubenswrapper[4753]: I0129 12:07:46.835447 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:46Z","lastTransitionTime":"2026-01-29T12:07:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:46 crc kubenswrapper[4753]: I0129 12:07:46.887562 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-p6m5g" Jan 29 12:07:46 crc kubenswrapper[4753]: I0129 12:07:46.887625 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 12:07:46 crc kubenswrapper[4753]: I0129 12:07:46.887640 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 12:07:46 crc kubenswrapper[4753]: E0129 12:07:46.887791 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-p6m5g" podUID="2412fa08-e643-4225-b494-eb999ea93fce" Jan 29 12:07:46 crc kubenswrapper[4753]: E0129 12:07:46.887958 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 12:07:46 crc kubenswrapper[4753]: E0129 12:07:46.888036 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 12:07:46 crc kubenswrapper[4753]: I0129 12:07:46.938712 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:46 crc kubenswrapper[4753]: I0129 12:07:46.938763 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:46 crc kubenswrapper[4753]: I0129 12:07:46.938776 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:46 crc kubenswrapper[4753]: I0129 12:07:46.938798 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:46 crc kubenswrapper[4753]: I0129 12:07:46.938811 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:46Z","lastTransitionTime":"2026-01-29T12:07:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:47 crc kubenswrapper[4753]: I0129 12:07:47.041747 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:47 crc kubenswrapper[4753]: I0129 12:07:47.041793 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:47 crc kubenswrapper[4753]: I0129 12:07:47.041804 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:47 crc kubenswrapper[4753]: I0129 12:07:47.041822 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:47 crc kubenswrapper[4753]: I0129 12:07:47.041833 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:47Z","lastTransitionTime":"2026-01-29T12:07:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:47 crc kubenswrapper[4753]: I0129 12:07:47.144251 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:47 crc kubenswrapper[4753]: I0129 12:07:47.144297 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:47 crc kubenswrapper[4753]: I0129 12:07:47.144306 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:47 crc kubenswrapper[4753]: I0129 12:07:47.144322 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:47 crc kubenswrapper[4753]: I0129 12:07:47.144331 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:47Z","lastTransitionTime":"2026-01-29T12:07:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:47 crc kubenswrapper[4753]: I0129 12:07:47.247407 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:47 crc kubenswrapper[4753]: I0129 12:07:47.247446 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:47 crc kubenswrapper[4753]: I0129 12:07:47.247456 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:47 crc kubenswrapper[4753]: I0129 12:07:47.247473 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:47 crc kubenswrapper[4753]: I0129 12:07:47.247482 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:47Z","lastTransitionTime":"2026-01-29T12:07:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:47 crc kubenswrapper[4753]: I0129 12:07:47.350364 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:47 crc kubenswrapper[4753]: I0129 12:07:47.350765 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:47 crc kubenswrapper[4753]: I0129 12:07:47.350906 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:47 crc kubenswrapper[4753]: I0129 12:07:47.351042 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:47 crc kubenswrapper[4753]: I0129 12:07:47.351150 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:47Z","lastTransitionTime":"2026-01-29T12:07:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:47 crc kubenswrapper[4753]: I0129 12:07:47.454121 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:47 crc kubenswrapper[4753]: I0129 12:07:47.454187 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:47 crc kubenswrapper[4753]: I0129 12:07:47.454205 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:47 crc kubenswrapper[4753]: I0129 12:07:47.454245 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:47 crc kubenswrapper[4753]: I0129 12:07:47.454255 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:47Z","lastTransitionTime":"2026-01-29T12:07:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:47 crc kubenswrapper[4753]: I0129 12:07:47.557833 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:47 crc kubenswrapper[4753]: I0129 12:07:47.557934 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:47 crc kubenswrapper[4753]: I0129 12:07:47.557971 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:47 crc kubenswrapper[4753]: I0129 12:07:47.557989 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:47 crc kubenswrapper[4753]: I0129 12:07:47.557999 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:47Z","lastTransitionTime":"2026-01-29T12:07:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:47 crc kubenswrapper[4753]: I0129 12:07:47.650694 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-08 05:31:36.589622894 +0000 UTC Jan 29 12:07:47 crc kubenswrapper[4753]: I0129 12:07:47.662039 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:47 crc kubenswrapper[4753]: I0129 12:07:47.662101 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:47 crc kubenswrapper[4753]: I0129 12:07:47.662118 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:47 crc kubenswrapper[4753]: I0129 12:07:47.662143 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:47 crc kubenswrapper[4753]: I0129 12:07:47.662159 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:47Z","lastTransitionTime":"2026-01-29T12:07:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:47 crc kubenswrapper[4753]: I0129 12:07:47.765698 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:47 crc kubenswrapper[4753]: I0129 12:07:47.765765 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:47 crc kubenswrapper[4753]: I0129 12:07:47.765777 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:47 crc kubenswrapper[4753]: I0129 12:07:47.765797 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:47 crc kubenswrapper[4753]: I0129 12:07:47.765811 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:47Z","lastTransitionTime":"2026-01-29T12:07:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:47 crc kubenswrapper[4753]: I0129 12:07:47.869560 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:47 crc kubenswrapper[4753]: I0129 12:07:47.869635 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:47 crc kubenswrapper[4753]: I0129 12:07:47.869657 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:47 crc kubenswrapper[4753]: I0129 12:07:47.869682 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:47 crc kubenswrapper[4753]: I0129 12:07:47.869700 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:47Z","lastTransitionTime":"2026-01-29T12:07:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:47 crc kubenswrapper[4753]: I0129 12:07:47.888021 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 12:07:47 crc kubenswrapper[4753]: E0129 12:07:47.888166 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 12:07:47 crc kubenswrapper[4753]: I0129 12:07:47.909846 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:47Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:47 crc kubenswrapper[4753]: I0129 12:07:47.927238 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:47Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:47 crc kubenswrapper[4753]: I0129 12:07:47.948063 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966eb8e59f82bacf20ebfa1b1d5bf9dc0a9cdb3b430ae83ed9bde106eb3fa521\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea7c92ec02a9cf6ec5fbd8efe22a066dfef9b48f042137964c5dd49578c99079\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:47Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:47 crc kubenswrapper[4753]: I0129 12:07:47.964380 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0310995-a7c7-47c3-ae6c-05daaaba92a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e56ac830346c0f85227bcebc9b76c2af8646767d4678934985d7b1f5846dcb82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-txdmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8391a68625fc3d8fdbc4f16b0b7b25e359798d08ce65ad8f482722910873418a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-txdmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:58Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7c24x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:47Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:47 crc kubenswrapper[4753]: I0129 12:07:47.973791 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:47 crc kubenswrapper[4753]: I0129 12:07:47.973913 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:47 crc kubenswrapper[4753]: I0129 12:07:47.973928 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:47 crc kubenswrapper[4753]: I0129 12:07:47.973949 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:47 crc kubenswrapper[4753]: I0129 12:07:47.973962 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:47Z","lastTransitionTime":"2026-01-29T12:07:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:47 crc kubenswrapper[4753]: I0129 12:07:47.981041 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rnbz9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b372210b-6e1b-4a80-b379-7c1d570712f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://346c36936603f4535d5ad3e6b60e451b05b4d376e2531c813f0c900c92caf5fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqzp6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:59Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rnbz9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:47Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:47 crc kubenswrapper[4753]: I0129 12:07:47.998422 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-9dtck" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"132de771-c8b4-403f-8916-8b453e7c6fc3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://30aac9e31bfe76eba96a50a30b03419b0b23ddd888485d609f37b84af28b8d3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gvjpd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://829af7b1f520503a0f36c60dd12b25d45c3831ba0cbb61f4f0a80582cb46da1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gvjpd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:07:21Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-9dtck\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:47Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:48 crc kubenswrapper[4753]: I0129 12:07:48.014704 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b108c546-a8c1-4bf9-814b-bfb5871b3013\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9842d9d6fed8c87bc20835964d3271b0be3117fedc658e6ef028db422d1ad478\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4fd4169036d3a646c9e5e4bfa8d86a92c557c3d8f77608ae5308dd0b8c45517\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b4a6bbb431e321e54013c96e3cca1537283cbdf65d89a978780550d603663fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://31bd7db7bddd3e16cb455f7af9fd3a9bca49919c8f1ffb676dec83c543ba7763\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:48Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:48 crc kubenswrapper[4753]: I0129 12:07:48.185051 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5d7ccf10988fd83d31d42be6f82403067a0cc50313c4d5dcddfb528507efffa4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:48Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:48 crc kubenswrapper[4753]: I0129 12:07:48.186108 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:48 crc kubenswrapper[4753]: I0129 12:07:48.186169 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:48 crc kubenswrapper[4753]: I0129 12:07:48.186180 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:48 crc kubenswrapper[4753]: I0129 12:07:48.186204 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:48 crc kubenswrapper[4753]: I0129 12:07:48.186217 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:48Z","lastTransitionTime":"2026-01-29T12:07:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:48 crc kubenswrapper[4753]: I0129 12:07:48.206807 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80bec2ab-0a88-4818-9339-760edda3b07e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f15016e29f1fc14f51b4d01e772071333fd8a885cacc1804b3a03dbcfc93836\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1e6a7d8df812546f17b3e026005c5062086e97e3fa364b3fa22cefec025e4eba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f71859413676dab4a435d43c11ad7cba315a12de744c3c8b161273f6ad36580c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0ec2a21ec299b5eed363668f39a270a8b03ea79b9de3dd23bda0c30579c3de3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91c1df8b6763320498420463a3841f0bfa4fcf753c355f549c3c1e93bf5206a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://05d1e99596530f467ecdae51aec8db191ac045cd85e121fc9835e2f3688f6ae7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7138397dae5e90b30b0cdec02d29c108d423b3c94b07ea63e1560c4f02ed607\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://355e2c7c84bbb5dcfd75734a95868268a117bb2fad6db6fc7c23ab6afbf7fb58\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T12:07:26Z\\\",\\\"message\\\":\\\"ift-kube-scheduler/scheduler_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.169:443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {39432221-5995-412b-967b-35e1a9405ec7}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF0129 12:07:26.279603 6197 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:26Z is after 2025-08-24T17:21:41Z]\\\\nI0129 12:07:26.279616 6197 obj_retry.go:303] Retry object setup: *v1.Pod openshift-i\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:25Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e7138397dae5e90b30b0cdec02d29c108d423b3c94b07ea63e1560c4f02ed607\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T12:07:43Z\\\",\\\"message\\\":\\\"rnalversions/factory.go:140\\\\nI0129 12:07:43.077687 6477 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0129 12:07:43.077875 6477 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0129 12:07:43.078075 6477 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0129 12:07:43.078370 6477 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0129 12:07:43.078709 6477 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0129 12:07:43.078725 6477 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0129 12:07:43.078784 6477 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0129 12:07:43.078798 6477 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0129 12:07:43.078806 6477 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0129 12:07:43.078847 6477 factory.go:656] Stopping watch factory\\\\nI0129 12:07:43.078862 6477 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0129 12:07:43.078866 6477 ovnkube.go:599] Stopped ovnkube\\\\nI0129 12:07:43.078876 6477 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0129 12\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e40e5b13dfdc135ea09253d466bacdfc5c2ac3d28eabc76d833d71ac300905c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:07:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-nzkvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:48Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:48 crc kubenswrapper[4753]: I0129 12:07:48.217893 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-5m2jf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0203345d-1e9f-4cfe-bde7-90f87221d1a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fdf3d79084d3e17b1f8cb0b285fd425280f34724c6a441104a2ec52b5217bfdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zzwn6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:58Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-5m2jf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:48Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:48 crc kubenswrapper[4753]: I0129 12:07:48.231010 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-p6m5g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2412fa08-e643-4225-b494-eb999ea93fce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zjjxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zjjxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:07:23Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-p6m5g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:48Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:48 crc kubenswrapper[4753]: I0129 12:07:48.247067 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c93f64b-e5af-4d6b-83e9-7fdfeb8548ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a9ec3e416b3b385c71b2fa28c0215fcb04b708e33fec2d85fdf0a75b848b027\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4fd3a920ea0c98edb2c8ad37d2a1a5d1ed50de5bc8c3e2f4d34c876b2cab54f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8dea62586ba5ac5a8ddc9e75b216a27593c8fe99516abc38c9c411a93842a372\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://44c03bba5a02d879c3b235cd65897fb30651871689c8f02b9526966817f79636\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44c03bba5a02d879c3b235cd65897fb30651871689c8f02b9526966817f79636\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:48Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:48 crc kubenswrapper[4753]: I0129 12:07:48.265395 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7962d88e183df5c4d91f5767f2007068ea2fdfd5c31d9d6ed609c2ab9ca26999\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:48Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:48 crc kubenswrapper[4753]: I0129 12:07:48.287519 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:48Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:48 crc kubenswrapper[4753]: I0129 12:07:48.290031 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:48 crc kubenswrapper[4753]: I0129 12:07:48.290086 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:48 crc kubenswrapper[4753]: I0129 12:07:48.290099 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:48 crc kubenswrapper[4753]: I0129 12:07:48.290121 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:48 crc kubenswrapper[4753]: I0129 12:07:48.290133 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:48Z","lastTransitionTime":"2026-01-29T12:07:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:48 crc kubenswrapper[4753]: I0129 12:07:48.300456 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-x8kzr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d753320a-61c2-4c0e-bd48-96d74b352114\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c7a667b7243fc36ff18cb4c017152fff3d5f4eb5e28e306ace44420cd722c5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tgns4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-x8kzr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:48Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:48 crc kubenswrapper[4753]: I0129 12:07:48.320916 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vwcjk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a99ab890-0a5a-4abb-86fb-a3731ff6b2c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e921d361518102483679b5ff7387e27858e315d637efb0f18e06215fe00f70a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd5300a3195f0f4d66a2789abcd65b4ade5652b5d05eb08bfd29932b97137464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd5300a3195f0f4d66a2789abcd65b4ade5652b5d05eb08bfd29932b97137464\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d95b548f1e6e754ff4a6f8d68364586d68b03662d0a679197fd0c99c41745691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d95b548f1e6e754ff4a6f8d68364586d68b03662d0a679197fd0c99c41745691\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b8b1c7640bd4881ba79f3471e309b2d1538aea8d0c8e01d3d7cfc3677d95725a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b8b1c7640bd4881ba79f3471e309b2d1538aea8d0c8e01d3d7cfc3677d95725a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0ba51b6e2180da460faa3599850175db9d7410880ba0214425fa275803f36d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0ba51b6e2180da460faa3599850175db9d7410880ba0214425fa275803f36d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0ed771b481178d6e12c669e122932ef25e1fe09db24c8f025f50a469400af184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0ed771b481178d6e12c669e122932ef25e1fe09db24c8f025f50a469400af184\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1990423fefacfb9b390af88b9af343c1bb4c02014db6bf8226918a7be9f4ad5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1990423fefacfb9b390af88b9af343c1bb4c02014db6bf8226918a7be9f4ad5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:59Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vwcjk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:48Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:48 crc kubenswrapper[4753]: I0129 12:07:48.339461 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"703a6b9d-a15d-4ed3-b00b-db7bd5d42c61\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cab3ab80a8fddbc591cd07fdbed800f525bb8a1b5505bf818ebb59cbcce73003\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7af8be0836a8ff7d08ff48d9487acdedd4b83127538d071d13b3215400c0f8de\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91d57a9cede640bdcec325175598b72dd00398436b730248728c7d9afd545b1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e86597c91848f265cd70410d57ac60eadba1d0322b9b897097366a41c93d8134\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34151f1c4f743e8fa0a2c27e644bbea9c3911e2252501ad8aa95ad3e4346222a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T12:06:54Z\\\",\\\"message\\\":\\\"ed a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3028279136/tls.crt::/tmp/serving-cert-3028279136/tls.key\\\\\\\"\\\\nI0129 12:06:54.190978 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 12:06:54.198044 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 12:06:54.198078 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 12:06:54.198139 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 12:06:54.198147 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 12:06:54.208089 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 12:06:54.208122 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 12:06:54.208128 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 12:06:54.208132 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 12:06:54.208135 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 12:06:54.208139 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 12:06:54.208143 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 12:06:54.208164 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI0129 12:06:54.212380 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-3028279136/tls.crt::/tmp/serving-cert-3028279136/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1769688397\\\\\\\\\\\\\\\" (2026-01-29 12:06:37 +0000 UTC to 2026-02-28 12:06:38 +0000 UTC (now=2026-01-29 12:06:54.212317535 +0000 UTC))\\\\\\\"\\\\nF0129 12:06:54.212498 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e02d977e34888952623f4ced5e073d7bf1ff360f8deb435dc577d2ecf146cb2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:33Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6538a934d097cd5a295952f64961a0a8fb5bfa5ebcd27a0a4dd3638029b9cdb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6538a934d097cd5a295952f64961a0a8fb5bfa5ebcd27a0a4dd3638029b9cdb1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:48Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:48 crc kubenswrapper[4753]: I0129 12:07:48.392783 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:48 crc kubenswrapper[4753]: I0129 12:07:48.392834 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:48 crc kubenswrapper[4753]: I0129 12:07:48.392852 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:48 crc kubenswrapper[4753]: I0129 12:07:48.392871 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:48 crc kubenswrapper[4753]: I0129 12:07:48.392883 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:48Z","lastTransitionTime":"2026-01-29T12:07:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:48 crc kubenswrapper[4753]: I0129 12:07:48.495775 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:48 crc kubenswrapper[4753]: I0129 12:07:48.495827 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:48 crc kubenswrapper[4753]: I0129 12:07:48.495844 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:48 crc kubenswrapper[4753]: I0129 12:07:48.495868 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:48 crc kubenswrapper[4753]: I0129 12:07:48.495884 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:48Z","lastTransitionTime":"2026-01-29T12:07:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:48 crc kubenswrapper[4753]: I0129 12:07:48.598599 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:48 crc kubenswrapper[4753]: I0129 12:07:48.598671 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:48 crc kubenswrapper[4753]: I0129 12:07:48.598681 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:48 crc kubenswrapper[4753]: I0129 12:07:48.598699 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:48 crc kubenswrapper[4753]: I0129 12:07:48.598711 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:48Z","lastTransitionTime":"2026-01-29T12:07:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:48 crc kubenswrapper[4753]: I0129 12:07:48.651174 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-01 19:07:17.880244968 +0000 UTC Jan 29 12:07:48 crc kubenswrapper[4753]: I0129 12:07:48.701439 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:48 crc kubenswrapper[4753]: I0129 12:07:48.701479 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:48 crc kubenswrapper[4753]: I0129 12:07:48.701488 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:48 crc kubenswrapper[4753]: I0129 12:07:48.701505 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:48 crc kubenswrapper[4753]: I0129 12:07:48.701515 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:48Z","lastTransitionTime":"2026-01-29T12:07:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:48 crc kubenswrapper[4753]: I0129 12:07:48.804061 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:48 crc kubenswrapper[4753]: I0129 12:07:48.804097 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:48 crc kubenswrapper[4753]: I0129 12:07:48.804107 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:48 crc kubenswrapper[4753]: I0129 12:07:48.804122 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:48 crc kubenswrapper[4753]: I0129 12:07:48.804131 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:48Z","lastTransitionTime":"2026-01-29T12:07:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:48 crc kubenswrapper[4753]: I0129 12:07:48.887703 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 12:07:48 crc kubenswrapper[4753]: I0129 12:07:48.887761 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-p6m5g" Jan 29 12:07:48 crc kubenswrapper[4753]: E0129 12:07:48.887883 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 12:07:48 crc kubenswrapper[4753]: I0129 12:07:48.887735 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 12:07:48 crc kubenswrapper[4753]: E0129 12:07:48.888025 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 12:07:48 crc kubenswrapper[4753]: E0129 12:07:48.888131 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-p6m5g" podUID="2412fa08-e643-4225-b494-eb999ea93fce" Jan 29 12:07:48 crc kubenswrapper[4753]: I0129 12:07:48.906263 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:48 crc kubenswrapper[4753]: I0129 12:07:48.906320 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:48 crc kubenswrapper[4753]: I0129 12:07:48.906332 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:48 crc kubenswrapper[4753]: I0129 12:07:48.906350 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:48 crc kubenswrapper[4753]: I0129 12:07:48.906362 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:48Z","lastTransitionTime":"2026-01-29T12:07:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:49 crc kubenswrapper[4753]: I0129 12:07:49.009306 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:49 crc kubenswrapper[4753]: I0129 12:07:49.009358 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:49 crc kubenswrapper[4753]: I0129 12:07:49.009369 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:49 crc kubenswrapper[4753]: I0129 12:07:49.009387 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:49 crc kubenswrapper[4753]: I0129 12:07:49.009401 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:49Z","lastTransitionTime":"2026-01-29T12:07:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:49 crc kubenswrapper[4753]: I0129 12:07:49.112325 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:49 crc kubenswrapper[4753]: I0129 12:07:49.112360 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:49 crc kubenswrapper[4753]: I0129 12:07:49.112370 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:49 crc kubenswrapper[4753]: I0129 12:07:49.112389 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:49 crc kubenswrapper[4753]: I0129 12:07:49.112402 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:49Z","lastTransitionTime":"2026-01-29T12:07:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:49 crc kubenswrapper[4753]: I0129 12:07:49.215281 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:49 crc kubenswrapper[4753]: I0129 12:07:49.215590 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:49 crc kubenswrapper[4753]: I0129 12:07:49.215681 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:49 crc kubenswrapper[4753]: I0129 12:07:49.215776 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:49 crc kubenswrapper[4753]: I0129 12:07:49.215887 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:49Z","lastTransitionTime":"2026-01-29T12:07:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:49 crc kubenswrapper[4753]: I0129 12:07:49.318920 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:49 crc kubenswrapper[4753]: I0129 12:07:49.318998 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:49 crc kubenswrapper[4753]: I0129 12:07:49.319010 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:49 crc kubenswrapper[4753]: I0129 12:07:49.319028 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:49 crc kubenswrapper[4753]: I0129 12:07:49.319040 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:49Z","lastTransitionTime":"2026-01-29T12:07:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:49 crc kubenswrapper[4753]: I0129 12:07:49.422028 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:49 crc kubenswrapper[4753]: I0129 12:07:49.422081 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:49 crc kubenswrapper[4753]: I0129 12:07:49.422091 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:49 crc kubenswrapper[4753]: I0129 12:07:49.422107 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:49 crc kubenswrapper[4753]: I0129 12:07:49.422116 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:49Z","lastTransitionTime":"2026-01-29T12:07:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:49 crc kubenswrapper[4753]: I0129 12:07:49.524365 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:49 crc kubenswrapper[4753]: I0129 12:07:49.524400 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:49 crc kubenswrapper[4753]: I0129 12:07:49.524408 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:49 crc kubenswrapper[4753]: I0129 12:07:49.524423 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:49 crc kubenswrapper[4753]: I0129 12:07:49.524431 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:49Z","lastTransitionTime":"2026-01-29T12:07:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:49 crc kubenswrapper[4753]: I0129 12:07:49.627481 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:49 crc kubenswrapper[4753]: I0129 12:07:49.627519 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:49 crc kubenswrapper[4753]: I0129 12:07:49.627528 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:49 crc kubenswrapper[4753]: I0129 12:07:49.627543 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:49 crc kubenswrapper[4753]: I0129 12:07:49.627552 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:49Z","lastTransitionTime":"2026-01-29T12:07:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:49 crc kubenswrapper[4753]: I0129 12:07:49.651632 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-20 20:50:48.197633987 +0000 UTC Jan 29 12:07:49 crc kubenswrapper[4753]: I0129 12:07:49.729955 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:49 crc kubenswrapper[4753]: I0129 12:07:49.730022 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:49 crc kubenswrapper[4753]: I0129 12:07:49.730046 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:49 crc kubenswrapper[4753]: I0129 12:07:49.730081 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:49 crc kubenswrapper[4753]: I0129 12:07:49.730103 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:49Z","lastTransitionTime":"2026-01-29T12:07:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:49 crc kubenswrapper[4753]: I0129 12:07:49.833490 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:49 crc kubenswrapper[4753]: I0129 12:07:49.833532 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:49 crc kubenswrapper[4753]: I0129 12:07:49.833541 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:49 crc kubenswrapper[4753]: I0129 12:07:49.833556 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:49 crc kubenswrapper[4753]: I0129 12:07:49.833566 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:49Z","lastTransitionTime":"2026-01-29T12:07:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:49 crc kubenswrapper[4753]: I0129 12:07:49.888452 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 12:07:49 crc kubenswrapper[4753]: E0129 12:07:49.888760 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 12:07:49 crc kubenswrapper[4753]: I0129 12:07:49.917172 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Jan 29 12:07:49 crc kubenswrapper[4753]: I0129 12:07:49.937019 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:49 crc kubenswrapper[4753]: I0129 12:07:49.937128 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:49 crc kubenswrapper[4753]: I0129 12:07:49.937151 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:49 crc kubenswrapper[4753]: I0129 12:07:49.937176 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:49 crc kubenswrapper[4753]: I0129 12:07:49.937209 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:49Z","lastTransitionTime":"2026-01-29T12:07:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:50 crc kubenswrapper[4753]: I0129 12:07:50.039460 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:50 crc kubenswrapper[4753]: I0129 12:07:50.039497 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:50 crc kubenswrapper[4753]: I0129 12:07:50.039507 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:50 crc kubenswrapper[4753]: I0129 12:07:50.039525 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:50 crc kubenswrapper[4753]: I0129 12:07:50.039537 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:50Z","lastTransitionTime":"2026-01-29T12:07:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:50 crc kubenswrapper[4753]: I0129 12:07:50.142462 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:50 crc kubenswrapper[4753]: I0129 12:07:50.142507 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:50 crc kubenswrapper[4753]: I0129 12:07:50.142516 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:50 crc kubenswrapper[4753]: I0129 12:07:50.142532 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:50 crc kubenswrapper[4753]: I0129 12:07:50.142542 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:50Z","lastTransitionTime":"2026-01-29T12:07:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:50 crc kubenswrapper[4753]: I0129 12:07:50.246262 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:50 crc kubenswrapper[4753]: I0129 12:07:50.246316 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:50 crc kubenswrapper[4753]: I0129 12:07:50.246329 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:50 crc kubenswrapper[4753]: I0129 12:07:50.246352 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:50 crc kubenswrapper[4753]: I0129 12:07:50.246377 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:50Z","lastTransitionTime":"2026-01-29T12:07:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:50 crc kubenswrapper[4753]: I0129 12:07:50.349375 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:50 crc kubenswrapper[4753]: I0129 12:07:50.349410 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:50 crc kubenswrapper[4753]: I0129 12:07:50.349419 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:50 crc kubenswrapper[4753]: I0129 12:07:50.349437 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:50 crc kubenswrapper[4753]: I0129 12:07:50.349447 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:50Z","lastTransitionTime":"2026-01-29T12:07:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:50 crc kubenswrapper[4753]: I0129 12:07:50.452742 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:50 crc kubenswrapper[4753]: I0129 12:07:50.452795 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:50 crc kubenswrapper[4753]: I0129 12:07:50.452806 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:50 crc kubenswrapper[4753]: I0129 12:07:50.452825 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:50 crc kubenswrapper[4753]: I0129 12:07:50.452835 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:50Z","lastTransitionTime":"2026-01-29T12:07:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:50 crc kubenswrapper[4753]: I0129 12:07:50.555923 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:50 crc kubenswrapper[4753]: I0129 12:07:50.555999 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:50 crc kubenswrapper[4753]: I0129 12:07:50.556022 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:50 crc kubenswrapper[4753]: I0129 12:07:50.556055 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:50 crc kubenswrapper[4753]: I0129 12:07:50.556098 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:50Z","lastTransitionTime":"2026-01-29T12:07:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:50 crc kubenswrapper[4753]: I0129 12:07:50.652783 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-11 22:35:24.606700307 +0000 UTC Jan 29 12:07:50 crc kubenswrapper[4753]: I0129 12:07:50.658941 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:50 crc kubenswrapper[4753]: I0129 12:07:50.658997 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:50 crc kubenswrapper[4753]: I0129 12:07:50.659010 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:50 crc kubenswrapper[4753]: I0129 12:07:50.659034 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:50 crc kubenswrapper[4753]: I0129 12:07:50.659045 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:50Z","lastTransitionTime":"2026-01-29T12:07:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:50 crc kubenswrapper[4753]: I0129 12:07:50.761741 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:50 crc kubenswrapper[4753]: I0129 12:07:50.761827 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:50 crc kubenswrapper[4753]: I0129 12:07:50.761840 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:50 crc kubenswrapper[4753]: I0129 12:07:50.761860 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:50 crc kubenswrapper[4753]: I0129 12:07:50.761872 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:50Z","lastTransitionTime":"2026-01-29T12:07:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:50 crc kubenswrapper[4753]: I0129 12:07:50.868264 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:50 crc kubenswrapper[4753]: I0129 12:07:50.868307 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:50 crc kubenswrapper[4753]: I0129 12:07:50.868317 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:50 crc kubenswrapper[4753]: I0129 12:07:50.868339 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:50 crc kubenswrapper[4753]: I0129 12:07:50.868353 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:50Z","lastTransitionTime":"2026-01-29T12:07:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:50 crc kubenswrapper[4753]: I0129 12:07:50.887621 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 12:07:50 crc kubenswrapper[4753]: E0129 12:07:50.887836 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 12:07:50 crc kubenswrapper[4753]: I0129 12:07:50.887621 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 12:07:50 crc kubenswrapper[4753]: I0129 12:07:50.887657 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-p6m5g" Jan 29 12:07:50 crc kubenswrapper[4753]: E0129 12:07:50.887939 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-p6m5g" podUID="2412fa08-e643-4225-b494-eb999ea93fce" Jan 29 12:07:50 crc kubenswrapper[4753]: E0129 12:07:50.888640 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 12:07:50 crc kubenswrapper[4753]: I0129 12:07:50.971004 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:50 crc kubenswrapper[4753]: I0129 12:07:50.971063 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:50 crc kubenswrapper[4753]: I0129 12:07:50.971074 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:50 crc kubenswrapper[4753]: I0129 12:07:50.971093 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:50 crc kubenswrapper[4753]: I0129 12:07:50.971106 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:50Z","lastTransitionTime":"2026-01-29T12:07:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:51 crc kubenswrapper[4753]: I0129 12:07:51.074495 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:51 crc kubenswrapper[4753]: I0129 12:07:51.074564 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:51 crc kubenswrapper[4753]: I0129 12:07:51.074590 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:51 crc kubenswrapper[4753]: I0129 12:07:51.074607 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:51 crc kubenswrapper[4753]: I0129 12:07:51.074617 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:51Z","lastTransitionTime":"2026-01-29T12:07:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:51 crc kubenswrapper[4753]: I0129 12:07:51.178842 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:51 crc kubenswrapper[4753]: I0129 12:07:51.178909 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:51 crc kubenswrapper[4753]: I0129 12:07:51.178930 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:51 crc kubenswrapper[4753]: I0129 12:07:51.178960 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:51 crc kubenswrapper[4753]: I0129 12:07:51.178976 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:51Z","lastTransitionTime":"2026-01-29T12:07:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:51 crc kubenswrapper[4753]: I0129 12:07:51.282165 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:51 crc kubenswrapper[4753]: I0129 12:07:51.282214 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:51 crc kubenswrapper[4753]: I0129 12:07:51.282249 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:51 crc kubenswrapper[4753]: I0129 12:07:51.282268 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:51 crc kubenswrapper[4753]: I0129 12:07:51.282279 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:51Z","lastTransitionTime":"2026-01-29T12:07:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:51 crc kubenswrapper[4753]: I0129 12:07:51.385450 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:51 crc kubenswrapper[4753]: I0129 12:07:51.385482 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:51 crc kubenswrapper[4753]: I0129 12:07:51.385491 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:51 crc kubenswrapper[4753]: I0129 12:07:51.385506 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:51 crc kubenswrapper[4753]: I0129 12:07:51.385515 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:51Z","lastTransitionTime":"2026-01-29T12:07:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:51 crc kubenswrapper[4753]: I0129 12:07:51.488472 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:51 crc kubenswrapper[4753]: I0129 12:07:51.488549 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:51 crc kubenswrapper[4753]: I0129 12:07:51.488563 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:51 crc kubenswrapper[4753]: I0129 12:07:51.488584 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:51 crc kubenswrapper[4753]: I0129 12:07:51.488596 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:51Z","lastTransitionTime":"2026-01-29T12:07:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:51 crc kubenswrapper[4753]: I0129 12:07:51.592484 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:51 crc kubenswrapper[4753]: I0129 12:07:51.592549 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:51 crc kubenswrapper[4753]: I0129 12:07:51.592560 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:51 crc kubenswrapper[4753]: I0129 12:07:51.592583 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:51 crc kubenswrapper[4753]: I0129 12:07:51.592599 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:51Z","lastTransitionTime":"2026-01-29T12:07:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:51 crc kubenswrapper[4753]: I0129 12:07:51.654008 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-16 14:01:40.39465589 +0000 UTC Jan 29 12:07:51 crc kubenswrapper[4753]: I0129 12:07:51.695396 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:51 crc kubenswrapper[4753]: I0129 12:07:51.695441 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:51 crc kubenswrapper[4753]: I0129 12:07:51.695449 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:51 crc kubenswrapper[4753]: I0129 12:07:51.695465 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:51 crc kubenswrapper[4753]: I0129 12:07:51.695476 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:51Z","lastTransitionTime":"2026-01-29T12:07:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:51 crc kubenswrapper[4753]: I0129 12:07:51.797587 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:51 crc kubenswrapper[4753]: I0129 12:07:51.797682 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:51 crc kubenswrapper[4753]: I0129 12:07:51.797696 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:51 crc kubenswrapper[4753]: I0129 12:07:51.797717 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:51 crc kubenswrapper[4753]: I0129 12:07:51.797747 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:51Z","lastTransitionTime":"2026-01-29T12:07:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:51 crc kubenswrapper[4753]: I0129 12:07:51.887877 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 12:07:51 crc kubenswrapper[4753]: E0129 12:07:51.888090 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 12:07:51 crc kubenswrapper[4753]: I0129 12:07:51.900772 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:51 crc kubenswrapper[4753]: I0129 12:07:51.900822 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:51 crc kubenswrapper[4753]: I0129 12:07:51.900833 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:51 crc kubenswrapper[4753]: I0129 12:07:51.900853 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:51 crc kubenswrapper[4753]: I0129 12:07:51.900868 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:51Z","lastTransitionTime":"2026-01-29T12:07:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:52 crc kubenswrapper[4753]: I0129 12:07:52.003509 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:52 crc kubenswrapper[4753]: I0129 12:07:52.003550 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:52 crc kubenswrapper[4753]: I0129 12:07:52.003560 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:52 crc kubenswrapper[4753]: I0129 12:07:52.003575 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:52 crc kubenswrapper[4753]: I0129 12:07:52.003583 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:52Z","lastTransitionTime":"2026-01-29T12:07:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:52 crc kubenswrapper[4753]: I0129 12:07:52.106647 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:52 crc kubenswrapper[4753]: I0129 12:07:52.106710 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:52 crc kubenswrapper[4753]: I0129 12:07:52.106719 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:52 crc kubenswrapper[4753]: I0129 12:07:52.106738 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:52 crc kubenswrapper[4753]: I0129 12:07:52.106749 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:52Z","lastTransitionTime":"2026-01-29T12:07:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:52 crc kubenswrapper[4753]: I0129 12:07:52.210158 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:52 crc kubenswrapper[4753]: I0129 12:07:52.210216 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:52 crc kubenswrapper[4753]: I0129 12:07:52.210252 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:52 crc kubenswrapper[4753]: I0129 12:07:52.210274 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:52 crc kubenswrapper[4753]: I0129 12:07:52.210289 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:52Z","lastTransitionTime":"2026-01-29T12:07:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:52 crc kubenswrapper[4753]: I0129 12:07:52.313764 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:52 crc kubenswrapper[4753]: I0129 12:07:52.313834 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:52 crc kubenswrapper[4753]: I0129 12:07:52.313853 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:52 crc kubenswrapper[4753]: I0129 12:07:52.313879 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:52 crc kubenswrapper[4753]: I0129 12:07:52.313897 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:52Z","lastTransitionTime":"2026-01-29T12:07:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:52 crc kubenswrapper[4753]: I0129 12:07:52.417365 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:52 crc kubenswrapper[4753]: I0129 12:07:52.417427 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:52 crc kubenswrapper[4753]: I0129 12:07:52.417438 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:52 crc kubenswrapper[4753]: I0129 12:07:52.417458 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:52 crc kubenswrapper[4753]: I0129 12:07:52.417471 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:52Z","lastTransitionTime":"2026-01-29T12:07:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:52 crc kubenswrapper[4753]: I0129 12:07:52.519935 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:52 crc kubenswrapper[4753]: I0129 12:07:52.519977 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:52 crc kubenswrapper[4753]: I0129 12:07:52.519986 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:52 crc kubenswrapper[4753]: I0129 12:07:52.520002 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:52 crc kubenswrapper[4753]: I0129 12:07:52.520011 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:52Z","lastTransitionTime":"2026-01-29T12:07:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:52 crc kubenswrapper[4753]: I0129 12:07:52.622621 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:52 crc kubenswrapper[4753]: I0129 12:07:52.622676 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:52 crc kubenswrapper[4753]: I0129 12:07:52.622686 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:52 crc kubenswrapper[4753]: I0129 12:07:52.622703 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:52 crc kubenswrapper[4753]: I0129 12:07:52.622714 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:52Z","lastTransitionTime":"2026-01-29T12:07:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:52 crc kubenswrapper[4753]: I0129 12:07:52.655250 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-31 16:29:28.935995382 +0000 UTC Jan 29 12:07:52 crc kubenswrapper[4753]: I0129 12:07:52.725562 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:52 crc kubenswrapper[4753]: I0129 12:07:52.725598 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:52 crc kubenswrapper[4753]: I0129 12:07:52.725607 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:52 crc kubenswrapper[4753]: I0129 12:07:52.725624 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:52 crc kubenswrapper[4753]: I0129 12:07:52.725634 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:52Z","lastTransitionTime":"2026-01-29T12:07:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:52 crc kubenswrapper[4753]: I0129 12:07:52.829701 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:52 crc kubenswrapper[4753]: I0129 12:07:52.829760 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:52 crc kubenswrapper[4753]: I0129 12:07:52.829773 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:52 crc kubenswrapper[4753]: I0129 12:07:52.829795 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:52 crc kubenswrapper[4753]: I0129 12:07:52.829816 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:52Z","lastTransitionTime":"2026-01-29T12:07:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:52 crc kubenswrapper[4753]: I0129 12:07:52.888273 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-p6m5g" Jan 29 12:07:52 crc kubenswrapper[4753]: I0129 12:07:52.888337 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 12:07:52 crc kubenswrapper[4753]: I0129 12:07:52.888287 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 12:07:52 crc kubenswrapper[4753]: E0129 12:07:52.888469 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-p6m5g" podUID="2412fa08-e643-4225-b494-eb999ea93fce" Jan 29 12:07:52 crc kubenswrapper[4753]: E0129 12:07:52.888685 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 12:07:52 crc kubenswrapper[4753]: E0129 12:07:52.888838 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 12:07:52 crc kubenswrapper[4753]: I0129 12:07:52.936492 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:52 crc kubenswrapper[4753]: I0129 12:07:52.936574 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:52 crc kubenswrapper[4753]: I0129 12:07:52.936597 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:52 crc kubenswrapper[4753]: I0129 12:07:52.937080 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:52 crc kubenswrapper[4753]: I0129 12:07:52.937107 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:52Z","lastTransitionTime":"2026-01-29T12:07:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:53 crc kubenswrapper[4753]: I0129 12:07:53.040934 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:53 crc kubenswrapper[4753]: I0129 12:07:53.040979 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:53 crc kubenswrapper[4753]: I0129 12:07:53.040987 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:53 crc kubenswrapper[4753]: I0129 12:07:53.041003 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:53 crc kubenswrapper[4753]: I0129 12:07:53.041011 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:53Z","lastTransitionTime":"2026-01-29T12:07:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:53 crc kubenswrapper[4753]: I0129 12:07:53.144142 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:53 crc kubenswrapper[4753]: I0129 12:07:53.144198 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:53 crc kubenswrapper[4753]: I0129 12:07:53.144211 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:53 crc kubenswrapper[4753]: I0129 12:07:53.144265 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:53 crc kubenswrapper[4753]: I0129 12:07:53.144281 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:53Z","lastTransitionTime":"2026-01-29T12:07:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:53 crc kubenswrapper[4753]: I0129 12:07:53.251030 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:53 crc kubenswrapper[4753]: I0129 12:07:53.251071 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:53 crc kubenswrapper[4753]: I0129 12:07:53.251085 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:53 crc kubenswrapper[4753]: I0129 12:07:53.251122 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:53 crc kubenswrapper[4753]: I0129 12:07:53.251132 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:53Z","lastTransitionTime":"2026-01-29T12:07:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:53 crc kubenswrapper[4753]: I0129 12:07:53.355678 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:53 crc kubenswrapper[4753]: I0129 12:07:53.355753 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:53 crc kubenswrapper[4753]: I0129 12:07:53.355776 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:53 crc kubenswrapper[4753]: I0129 12:07:53.355811 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:53 crc kubenswrapper[4753]: I0129 12:07:53.355835 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:53Z","lastTransitionTime":"2026-01-29T12:07:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:53 crc kubenswrapper[4753]: I0129 12:07:53.465936 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:53 crc kubenswrapper[4753]: I0129 12:07:53.466027 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:53 crc kubenswrapper[4753]: I0129 12:07:53.466056 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:53 crc kubenswrapper[4753]: I0129 12:07:53.466092 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:53 crc kubenswrapper[4753]: I0129 12:07:53.466116 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:53Z","lastTransitionTime":"2026-01-29T12:07:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:53 crc kubenswrapper[4753]: I0129 12:07:53.568722 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:53 crc kubenswrapper[4753]: I0129 12:07:53.569112 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:53 crc kubenswrapper[4753]: I0129 12:07:53.569253 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:53 crc kubenswrapper[4753]: I0129 12:07:53.569409 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:53 crc kubenswrapper[4753]: I0129 12:07:53.569571 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:53Z","lastTransitionTime":"2026-01-29T12:07:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:53 crc kubenswrapper[4753]: I0129 12:07:53.656028 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-02 12:05:53.010828136 +0000 UTC Jan 29 12:07:53 crc kubenswrapper[4753]: I0129 12:07:53.673865 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:53 crc kubenswrapper[4753]: I0129 12:07:53.674314 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:53 crc kubenswrapper[4753]: I0129 12:07:53.674403 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:53 crc kubenswrapper[4753]: I0129 12:07:53.674536 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:53 crc kubenswrapper[4753]: I0129 12:07:53.674611 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:53Z","lastTransitionTime":"2026-01-29T12:07:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:53 crc kubenswrapper[4753]: I0129 12:07:53.777568 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:53 crc kubenswrapper[4753]: I0129 12:07:53.777624 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:53 crc kubenswrapper[4753]: I0129 12:07:53.777636 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:53 crc kubenswrapper[4753]: I0129 12:07:53.777658 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:53 crc kubenswrapper[4753]: I0129 12:07:53.777668 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:53Z","lastTransitionTime":"2026-01-29T12:07:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:53 crc kubenswrapper[4753]: I0129 12:07:53.880867 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:53 crc kubenswrapper[4753]: I0129 12:07:53.880969 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:53 crc kubenswrapper[4753]: I0129 12:07:53.881001 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:53 crc kubenswrapper[4753]: I0129 12:07:53.881040 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:53 crc kubenswrapper[4753]: I0129 12:07:53.881080 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:53Z","lastTransitionTime":"2026-01-29T12:07:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:53 crc kubenswrapper[4753]: I0129 12:07:53.888269 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 12:07:53 crc kubenswrapper[4753]: E0129 12:07:53.888520 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 12:07:53 crc kubenswrapper[4753]: I0129 12:07:53.984159 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:53 crc kubenswrapper[4753]: I0129 12:07:53.984238 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:53 crc kubenswrapper[4753]: I0129 12:07:53.984248 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:53 crc kubenswrapper[4753]: I0129 12:07:53.984265 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:53 crc kubenswrapper[4753]: I0129 12:07:53.984276 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:53Z","lastTransitionTime":"2026-01-29T12:07:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:54 crc kubenswrapper[4753]: I0129 12:07:54.087656 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:54 crc kubenswrapper[4753]: I0129 12:07:54.087712 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:54 crc kubenswrapper[4753]: I0129 12:07:54.087729 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:54 crc kubenswrapper[4753]: I0129 12:07:54.087752 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:54 crc kubenswrapper[4753]: I0129 12:07:54.087767 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:54Z","lastTransitionTime":"2026-01-29T12:07:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:54 crc kubenswrapper[4753]: I0129 12:07:54.190626 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:54 crc kubenswrapper[4753]: I0129 12:07:54.191191 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:54 crc kubenswrapper[4753]: I0129 12:07:54.191282 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:54 crc kubenswrapper[4753]: I0129 12:07:54.191457 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:54 crc kubenswrapper[4753]: I0129 12:07:54.191526 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:54Z","lastTransitionTime":"2026-01-29T12:07:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:54 crc kubenswrapper[4753]: I0129 12:07:54.293898 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:54 crc kubenswrapper[4753]: I0129 12:07:54.293946 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:54 crc kubenswrapper[4753]: I0129 12:07:54.293955 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:54 crc kubenswrapper[4753]: I0129 12:07:54.293974 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:54 crc kubenswrapper[4753]: I0129 12:07:54.293985 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:54Z","lastTransitionTime":"2026-01-29T12:07:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:54 crc kubenswrapper[4753]: I0129 12:07:54.325101 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:54 crc kubenswrapper[4753]: I0129 12:07:54.325152 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:54 crc kubenswrapper[4753]: I0129 12:07:54.325170 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:54 crc kubenswrapper[4753]: I0129 12:07:54.325193 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:54 crc kubenswrapper[4753]: I0129 12:07:54.325207 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:54Z","lastTransitionTime":"2026-01-29T12:07:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:54 crc kubenswrapper[4753]: E0129 12:07:54.344349 4753 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:54Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:54Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:54Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:54Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:54Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:54Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7536a030-b66d-4df9-a1ec-9890f7ad99e7\\\",\\\"systemUUID\\\":\\\"ce89c1ce-259c-4b78-b348-3ef96afb6944\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:54Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:54 crc kubenswrapper[4753]: I0129 12:07:54.350578 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:54 crc kubenswrapper[4753]: I0129 12:07:54.350660 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:54 crc kubenswrapper[4753]: I0129 12:07:54.350686 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:54 crc kubenswrapper[4753]: I0129 12:07:54.350721 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:54 crc kubenswrapper[4753]: I0129 12:07:54.350739 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:54Z","lastTransitionTime":"2026-01-29T12:07:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:54 crc kubenswrapper[4753]: E0129 12:07:54.375343 4753 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:54Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:54Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:54Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:54Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:54Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:54Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7536a030-b66d-4df9-a1ec-9890f7ad99e7\\\",\\\"systemUUID\\\":\\\"ce89c1ce-259c-4b78-b348-3ef96afb6944\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:54Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:54 crc kubenswrapper[4753]: I0129 12:07:54.380871 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:54 crc kubenswrapper[4753]: I0129 12:07:54.380955 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:54 crc kubenswrapper[4753]: I0129 12:07:54.380980 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:54 crc kubenswrapper[4753]: I0129 12:07:54.381013 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:54 crc kubenswrapper[4753]: I0129 12:07:54.381032 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:54Z","lastTransitionTime":"2026-01-29T12:07:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:54 crc kubenswrapper[4753]: E0129 12:07:54.403289 4753 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:54Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:54Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:54Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:54Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:54Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:54Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7536a030-b66d-4df9-a1ec-9890f7ad99e7\\\",\\\"systemUUID\\\":\\\"ce89c1ce-259c-4b78-b348-3ef96afb6944\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:54Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:54 crc kubenswrapper[4753]: I0129 12:07:54.407307 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:54 crc kubenswrapper[4753]: I0129 12:07:54.407370 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:54 crc kubenswrapper[4753]: I0129 12:07:54.407386 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:54 crc kubenswrapper[4753]: I0129 12:07:54.407409 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:54 crc kubenswrapper[4753]: I0129 12:07:54.407424 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:54Z","lastTransitionTime":"2026-01-29T12:07:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:54 crc kubenswrapper[4753]: E0129 12:07:54.422332 4753 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:54Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:54Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:54Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:54Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:54Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:54Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7536a030-b66d-4df9-a1ec-9890f7ad99e7\\\",\\\"systemUUID\\\":\\\"ce89c1ce-259c-4b78-b348-3ef96afb6944\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:54Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:54 crc kubenswrapper[4753]: I0129 12:07:54.426796 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:54 crc kubenswrapper[4753]: I0129 12:07:54.426848 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:54 crc kubenswrapper[4753]: I0129 12:07:54.426863 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:54 crc kubenswrapper[4753]: I0129 12:07:54.426892 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:54 crc kubenswrapper[4753]: I0129 12:07:54.426904 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:54Z","lastTransitionTime":"2026-01-29T12:07:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:54 crc kubenswrapper[4753]: E0129 12:07:54.443834 4753 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:54Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:54Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:54Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:54Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:07:54Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:54Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7536a030-b66d-4df9-a1ec-9890f7ad99e7\\\",\\\"systemUUID\\\":\\\"ce89c1ce-259c-4b78-b348-3ef96afb6944\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:54Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:54 crc kubenswrapper[4753]: E0129 12:07:54.444026 4753 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 29 12:07:54 crc kubenswrapper[4753]: I0129 12:07:54.446279 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:54 crc kubenswrapper[4753]: I0129 12:07:54.446338 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:54 crc kubenswrapper[4753]: I0129 12:07:54.446351 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:54 crc kubenswrapper[4753]: I0129 12:07:54.446376 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:54 crc kubenswrapper[4753]: I0129 12:07:54.446390 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:54Z","lastTransitionTime":"2026-01-29T12:07:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:54 crc kubenswrapper[4753]: I0129 12:07:54.550257 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:54 crc kubenswrapper[4753]: I0129 12:07:54.550311 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:54 crc kubenswrapper[4753]: I0129 12:07:54.550323 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:54 crc kubenswrapper[4753]: I0129 12:07:54.550341 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:54 crc kubenswrapper[4753]: I0129 12:07:54.550352 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:54Z","lastTransitionTime":"2026-01-29T12:07:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:54 crc kubenswrapper[4753]: I0129 12:07:54.653587 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:54 crc kubenswrapper[4753]: I0129 12:07:54.653705 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:54 crc kubenswrapper[4753]: I0129 12:07:54.653728 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:54 crc kubenswrapper[4753]: I0129 12:07:54.653757 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:54 crc kubenswrapper[4753]: I0129 12:07:54.653776 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:54Z","lastTransitionTime":"2026-01-29T12:07:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:54 crc kubenswrapper[4753]: I0129 12:07:54.656813 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-24 10:42:35.868879899 +0000 UTC Jan 29 12:07:54 crc kubenswrapper[4753]: I0129 12:07:54.756329 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:54 crc kubenswrapper[4753]: I0129 12:07:54.756533 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:54 crc kubenswrapper[4753]: I0129 12:07:54.756559 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:54 crc kubenswrapper[4753]: I0129 12:07:54.756585 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:54 crc kubenswrapper[4753]: I0129 12:07:54.756603 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:54Z","lastTransitionTime":"2026-01-29T12:07:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:54 crc kubenswrapper[4753]: I0129 12:07:54.858905 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:54 crc kubenswrapper[4753]: I0129 12:07:54.858969 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:54 crc kubenswrapper[4753]: I0129 12:07:54.858995 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:54 crc kubenswrapper[4753]: I0129 12:07:54.859027 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:54 crc kubenswrapper[4753]: I0129 12:07:54.859049 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:54Z","lastTransitionTime":"2026-01-29T12:07:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:54 crc kubenswrapper[4753]: I0129 12:07:54.888341 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-p6m5g" Jan 29 12:07:54 crc kubenswrapper[4753]: I0129 12:07:54.888518 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 12:07:54 crc kubenswrapper[4753]: I0129 12:07:54.889139 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 12:07:54 crc kubenswrapper[4753]: E0129 12:07:54.889493 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-p6m5g" podUID="2412fa08-e643-4225-b494-eb999ea93fce" Jan 29 12:07:54 crc kubenswrapper[4753]: E0129 12:07:54.889890 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 12:07:54 crc kubenswrapper[4753]: E0129 12:07:54.890198 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 12:07:54 crc kubenswrapper[4753]: I0129 12:07:54.962571 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:54 crc kubenswrapper[4753]: I0129 12:07:54.962622 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:54 crc kubenswrapper[4753]: I0129 12:07:54.962632 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:54 crc kubenswrapper[4753]: I0129 12:07:54.962648 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:54 crc kubenswrapper[4753]: I0129 12:07:54.962658 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:54Z","lastTransitionTime":"2026-01-29T12:07:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:55 crc kubenswrapper[4753]: I0129 12:07:55.065985 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:55 crc kubenswrapper[4753]: I0129 12:07:55.066054 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:55 crc kubenswrapper[4753]: I0129 12:07:55.066070 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:55 crc kubenswrapper[4753]: I0129 12:07:55.066091 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:55 crc kubenswrapper[4753]: I0129 12:07:55.066105 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:55Z","lastTransitionTime":"2026-01-29T12:07:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:55 crc kubenswrapper[4753]: I0129 12:07:55.149868 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2412fa08-e643-4225-b494-eb999ea93fce-metrics-certs\") pod \"network-metrics-daemon-p6m5g\" (UID: \"2412fa08-e643-4225-b494-eb999ea93fce\") " pod="openshift-multus/network-metrics-daemon-p6m5g" Jan 29 12:07:55 crc kubenswrapper[4753]: E0129 12:07:55.150156 4753 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 29 12:07:55 crc kubenswrapper[4753]: E0129 12:07:55.150418 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2412fa08-e643-4225-b494-eb999ea93fce-metrics-certs podName:2412fa08-e643-4225-b494-eb999ea93fce nodeName:}" failed. No retries permitted until 2026-01-29 12:08:27.15033961 +0000 UTC m=+121.402421105 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/2412fa08-e643-4225-b494-eb999ea93fce-metrics-certs") pod "network-metrics-daemon-p6m5g" (UID: "2412fa08-e643-4225-b494-eb999ea93fce") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 29 12:07:55 crc kubenswrapper[4753]: I0129 12:07:55.169404 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:55 crc kubenswrapper[4753]: I0129 12:07:55.169483 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:55 crc kubenswrapper[4753]: I0129 12:07:55.169503 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:55 crc kubenswrapper[4753]: I0129 12:07:55.169529 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:55 crc kubenswrapper[4753]: I0129 12:07:55.169547 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:55Z","lastTransitionTime":"2026-01-29T12:07:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:55 crc kubenswrapper[4753]: I0129 12:07:55.273686 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:55 crc kubenswrapper[4753]: I0129 12:07:55.273735 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:55 crc kubenswrapper[4753]: I0129 12:07:55.273747 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:55 crc kubenswrapper[4753]: I0129 12:07:55.273767 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:55 crc kubenswrapper[4753]: I0129 12:07:55.273780 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:55Z","lastTransitionTime":"2026-01-29T12:07:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:55 crc kubenswrapper[4753]: I0129 12:07:55.376999 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:55 crc kubenswrapper[4753]: I0129 12:07:55.377169 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:55 crc kubenswrapper[4753]: I0129 12:07:55.377192 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:55 crc kubenswrapper[4753]: I0129 12:07:55.377400 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:55 crc kubenswrapper[4753]: I0129 12:07:55.377444 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:55Z","lastTransitionTime":"2026-01-29T12:07:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:55 crc kubenswrapper[4753]: I0129 12:07:55.480538 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:55 crc kubenswrapper[4753]: I0129 12:07:55.480600 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:55 crc kubenswrapper[4753]: I0129 12:07:55.480617 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:55 crc kubenswrapper[4753]: I0129 12:07:55.480641 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:55 crc kubenswrapper[4753]: I0129 12:07:55.480657 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:55Z","lastTransitionTime":"2026-01-29T12:07:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:55 crc kubenswrapper[4753]: I0129 12:07:55.584100 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:55 crc kubenswrapper[4753]: I0129 12:07:55.584134 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:55 crc kubenswrapper[4753]: I0129 12:07:55.584143 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:55 crc kubenswrapper[4753]: I0129 12:07:55.584158 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:55 crc kubenswrapper[4753]: I0129 12:07:55.584168 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:55Z","lastTransitionTime":"2026-01-29T12:07:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:55 crc kubenswrapper[4753]: I0129 12:07:55.657943 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-17 23:18:04.384449718 +0000 UTC Jan 29 12:07:55 crc kubenswrapper[4753]: I0129 12:07:55.688842 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:55 crc kubenswrapper[4753]: I0129 12:07:55.688877 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:55 crc kubenswrapper[4753]: I0129 12:07:55.688886 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:55 crc kubenswrapper[4753]: I0129 12:07:55.688901 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:55 crc kubenswrapper[4753]: I0129 12:07:55.688911 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:55Z","lastTransitionTime":"2026-01-29T12:07:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:55 crc kubenswrapper[4753]: I0129 12:07:55.791707 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:55 crc kubenswrapper[4753]: I0129 12:07:55.791772 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:55 crc kubenswrapper[4753]: I0129 12:07:55.791783 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:55 crc kubenswrapper[4753]: I0129 12:07:55.791806 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:55 crc kubenswrapper[4753]: I0129 12:07:55.791906 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:55Z","lastTransitionTime":"2026-01-29T12:07:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:55 crc kubenswrapper[4753]: I0129 12:07:55.888388 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 12:07:55 crc kubenswrapper[4753]: E0129 12:07:55.888584 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 12:07:55 crc kubenswrapper[4753]: I0129 12:07:55.894075 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:55 crc kubenswrapper[4753]: I0129 12:07:55.894112 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:55 crc kubenswrapper[4753]: I0129 12:07:55.894124 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:55 crc kubenswrapper[4753]: I0129 12:07:55.894137 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:55 crc kubenswrapper[4753]: I0129 12:07:55.894148 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:55Z","lastTransitionTime":"2026-01-29T12:07:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:55 crc kubenswrapper[4753]: I0129 12:07:55.996562 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:55 crc kubenswrapper[4753]: I0129 12:07:55.996616 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:55 crc kubenswrapper[4753]: I0129 12:07:55.996628 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:55 crc kubenswrapper[4753]: I0129 12:07:55.996649 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:55 crc kubenswrapper[4753]: I0129 12:07:55.996662 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:55Z","lastTransitionTime":"2026-01-29T12:07:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:56 crc kubenswrapper[4753]: I0129 12:07:56.099992 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:56 crc kubenswrapper[4753]: I0129 12:07:56.100038 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:56 crc kubenswrapper[4753]: I0129 12:07:56.100049 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:56 crc kubenswrapper[4753]: I0129 12:07:56.100066 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:56 crc kubenswrapper[4753]: I0129 12:07:56.100076 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:56Z","lastTransitionTime":"2026-01-29T12:07:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:56 crc kubenswrapper[4753]: I0129 12:07:56.203472 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:56 crc kubenswrapper[4753]: I0129 12:07:56.203519 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:56 crc kubenswrapper[4753]: I0129 12:07:56.203531 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:56 crc kubenswrapper[4753]: I0129 12:07:56.203551 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:56 crc kubenswrapper[4753]: I0129 12:07:56.203565 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:56Z","lastTransitionTime":"2026-01-29T12:07:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:56 crc kubenswrapper[4753]: I0129 12:07:56.306145 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:56 crc kubenswrapper[4753]: I0129 12:07:56.306201 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:56 crc kubenswrapper[4753]: I0129 12:07:56.306214 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:56 crc kubenswrapper[4753]: I0129 12:07:56.306253 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:56 crc kubenswrapper[4753]: I0129 12:07:56.306287 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:56Z","lastTransitionTime":"2026-01-29T12:07:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:56 crc kubenswrapper[4753]: I0129 12:07:56.409279 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:56 crc kubenswrapper[4753]: I0129 12:07:56.409335 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:56 crc kubenswrapper[4753]: I0129 12:07:56.409348 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:56 crc kubenswrapper[4753]: I0129 12:07:56.409369 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:56 crc kubenswrapper[4753]: I0129 12:07:56.409382 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:56Z","lastTransitionTime":"2026-01-29T12:07:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:56 crc kubenswrapper[4753]: I0129 12:07:56.512171 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:56 crc kubenswrapper[4753]: I0129 12:07:56.512214 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:56 crc kubenswrapper[4753]: I0129 12:07:56.512237 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:56 crc kubenswrapper[4753]: I0129 12:07:56.512255 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:56 crc kubenswrapper[4753]: I0129 12:07:56.512265 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:56Z","lastTransitionTime":"2026-01-29T12:07:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:56 crc kubenswrapper[4753]: I0129 12:07:56.614874 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:56 crc kubenswrapper[4753]: I0129 12:07:56.614969 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:56 crc kubenswrapper[4753]: I0129 12:07:56.614981 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:56 crc kubenswrapper[4753]: I0129 12:07:56.615003 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:56 crc kubenswrapper[4753]: I0129 12:07:56.615016 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:56Z","lastTransitionTime":"2026-01-29T12:07:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:56 crc kubenswrapper[4753]: I0129 12:07:56.659185 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-18 08:16:12.906455787 +0000 UTC Jan 29 12:07:56 crc kubenswrapper[4753]: I0129 12:07:56.717944 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:56 crc kubenswrapper[4753]: I0129 12:07:56.718025 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:56 crc kubenswrapper[4753]: I0129 12:07:56.718054 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:56 crc kubenswrapper[4753]: I0129 12:07:56.718072 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:56 crc kubenswrapper[4753]: I0129 12:07:56.718080 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:56Z","lastTransitionTime":"2026-01-29T12:07:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:56 crc kubenswrapper[4753]: I0129 12:07:56.820502 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:56 crc kubenswrapper[4753]: I0129 12:07:56.820564 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:56 crc kubenswrapper[4753]: I0129 12:07:56.820577 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:56 crc kubenswrapper[4753]: I0129 12:07:56.820636 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:56 crc kubenswrapper[4753]: I0129 12:07:56.820651 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:56Z","lastTransitionTime":"2026-01-29T12:07:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:56 crc kubenswrapper[4753]: I0129 12:07:56.888425 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 12:07:56 crc kubenswrapper[4753]: I0129 12:07:56.888745 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 12:07:56 crc kubenswrapper[4753]: E0129 12:07:56.888744 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 12:07:56 crc kubenswrapper[4753]: I0129 12:07:56.888810 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-p6m5g" Jan 29 12:07:56 crc kubenswrapper[4753]: E0129 12:07:56.889324 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 12:07:56 crc kubenswrapper[4753]: E0129 12:07:56.889399 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-p6m5g" podUID="2412fa08-e643-4225-b494-eb999ea93fce" Jan 29 12:07:56 crc kubenswrapper[4753]: I0129 12:07:56.890072 4753 scope.go:117] "RemoveContainer" containerID="e7138397dae5e90b30b0cdec02d29c108d423b3c94b07ea63e1560c4f02ed607" Jan 29 12:07:56 crc kubenswrapper[4753]: E0129 12:07:56.890463 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-nzkvz_openshift-ovn-kubernetes(80bec2ab-0a88-4818-9339-760edda3b07e)\"" pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" podUID="80bec2ab-0a88-4818-9339-760edda3b07e" Jan 29 12:07:56 crc kubenswrapper[4753]: I0129 12:07:56.907890 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5d7ccf10988fd83d31d42be6f82403067a0cc50313c4d5dcddfb528507efffa4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:56Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:56 crc kubenswrapper[4753]: I0129 12:07:56.923613 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:56 crc kubenswrapper[4753]: I0129 12:07:56.923666 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:56 crc kubenswrapper[4753]: I0129 12:07:56.923679 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:56 crc kubenswrapper[4753]: I0129 12:07:56.923700 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:56 crc kubenswrapper[4753]: I0129 12:07:56.923714 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:56Z","lastTransitionTime":"2026-01-29T12:07:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:56 crc kubenswrapper[4753]: I0129 12:07:56.929827 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80bec2ab-0a88-4818-9339-760edda3b07e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f15016e29f1fc14f51b4d01e772071333fd8a885cacc1804b3a03dbcfc93836\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1e6a7d8df812546f17b3e026005c5062086e97e3fa364b3fa22cefec025e4eba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f71859413676dab4a435d43c11ad7cba315a12de744c3c8b161273f6ad36580c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0ec2a21ec299b5eed363668f39a270a8b03ea79b9de3dd23bda0c30579c3de3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91c1df8b6763320498420463a3841f0bfa4fcf753c355f549c3c1e93bf5206a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://05d1e99596530f467ecdae51aec8db191ac045cd85e121fc9835e2f3688f6ae7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7138397dae5e90b30b0cdec02d29c108d423b3c94b07ea63e1560c4f02ed607\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e7138397dae5e90b30b0cdec02d29c108d423b3c94b07ea63e1560c4f02ed607\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T12:07:43Z\\\",\\\"message\\\":\\\"rnalversions/factory.go:140\\\\nI0129 12:07:43.077687 6477 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0129 12:07:43.077875 6477 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0129 12:07:43.078075 6477 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0129 12:07:43.078370 6477 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0129 12:07:43.078709 6477 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0129 12:07:43.078725 6477 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0129 12:07:43.078784 6477 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0129 12:07:43.078798 6477 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0129 12:07:43.078806 6477 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0129 12:07:43.078847 6477 factory.go:656] Stopping watch factory\\\\nI0129 12:07:43.078862 6477 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0129 12:07:43.078866 6477 ovnkube.go:599] Stopped ovnkube\\\\nI0129 12:07:43.078876 6477 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0129 12\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:41Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-nzkvz_openshift-ovn-kubernetes(80bec2ab-0a88-4818-9339-760edda3b07e)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e40e5b13dfdc135ea09253d466bacdfc5c2ac3d28eabc76d833d71ac300905c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:07:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-nzkvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:56Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:56 crc kubenswrapper[4753]: I0129 12:07:56.943003 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-5m2jf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0203345d-1e9f-4cfe-bde7-90f87221d1a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fdf3d79084d3e17b1f8cb0b285fd425280f34724c6a441104a2ec52b5217bfdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zzwn6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:58Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-5m2jf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:56Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:56 crc kubenswrapper[4753]: I0129 12:07:56.957722 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-p6m5g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2412fa08-e643-4225-b494-eb999ea93fce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zjjxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zjjxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:07:23Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-p6m5g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:56Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:56 crc kubenswrapper[4753]: I0129 12:07:56.971924 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c93f64b-e5af-4d6b-83e9-7fdfeb8548ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a9ec3e416b3b385c71b2fa28c0215fcb04b708e33fec2d85fdf0a75b848b027\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4fd3a920ea0c98edb2c8ad37d2a1a5d1ed50de5bc8c3e2f4d34c876b2cab54f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8dea62586ba5ac5a8ddc9e75b216a27593c8fe99516abc38c9c411a93842a372\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://44c03bba5a02d879c3b235cd65897fb30651871689c8f02b9526966817f79636\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44c03bba5a02d879c3b235cd65897fb30651871689c8f02b9526966817f79636\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:56Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:56 crc kubenswrapper[4753]: I0129 12:07:56.986373 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7962d88e183df5c4d91f5767f2007068ea2fdfd5c31d9d6ed609c2ab9ca26999\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:56Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:57 crc kubenswrapper[4753]: I0129 12:07:57.002254 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:56Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:57 crc kubenswrapper[4753]: I0129 12:07:57.014519 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-x8kzr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d753320a-61c2-4c0e-bd48-96d74b352114\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c7a667b7243fc36ff18cb4c017152fff3d5f4eb5e28e306ace44420cd722c5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tgns4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-x8kzr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:57Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:57 crc kubenswrapper[4753]: I0129 12:07:57.027054 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:57 crc kubenswrapper[4753]: I0129 12:07:57.027115 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:57 crc kubenswrapper[4753]: I0129 12:07:57.027128 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:57 crc kubenswrapper[4753]: I0129 12:07:57.027152 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:57 crc kubenswrapper[4753]: I0129 12:07:57.027164 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:57Z","lastTransitionTime":"2026-01-29T12:07:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:57 crc kubenswrapper[4753]: I0129 12:07:57.036662 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vwcjk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a99ab890-0a5a-4abb-86fb-a3731ff6b2c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e921d361518102483679b5ff7387e27858e315d637efb0f18e06215fe00f70a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd5300a3195f0f4d66a2789abcd65b4ade5652b5d05eb08bfd29932b97137464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd5300a3195f0f4d66a2789abcd65b4ade5652b5d05eb08bfd29932b97137464\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d95b548f1e6e754ff4a6f8d68364586d68b03662d0a679197fd0c99c41745691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d95b548f1e6e754ff4a6f8d68364586d68b03662d0a679197fd0c99c41745691\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b8b1c7640bd4881ba79f3471e309b2d1538aea8d0c8e01d3d7cfc3677d95725a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b8b1c7640bd4881ba79f3471e309b2d1538aea8d0c8e01d3d7cfc3677d95725a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0ba51b6e2180da460faa3599850175db9d7410880ba0214425fa275803f36d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0ba51b6e2180da460faa3599850175db9d7410880ba0214425fa275803f36d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0ed771b481178d6e12c669e122932ef25e1fe09db24c8f025f50a469400af184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0ed771b481178d6e12c669e122932ef25e1fe09db24c8f025f50a469400af184\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1990423fefacfb9b390af88b9af343c1bb4c02014db6bf8226918a7be9f4ad5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1990423fefacfb9b390af88b9af343c1bb4c02014db6bf8226918a7be9f4ad5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:59Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vwcjk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:57Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:57 crc kubenswrapper[4753]: I0129 12:07:57.051910 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"acf2e7cc-e85d-43b0-84c4-7d2de575457f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://308d58d7f5a84b4d1980716b231abfe2ea2e9355bff9f5c998584d6986a9a389\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa000ea086031e13fdded4648ef18460b2192b270143a3866a3f448d5a006eb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fa000ea086031e13fdded4648ef18460b2192b270143a3866a3f448d5a006eb5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:57Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:57 crc kubenswrapper[4753]: I0129 12:07:57.072665 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"703a6b9d-a15d-4ed3-b00b-db7bd5d42c61\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cab3ab80a8fddbc591cd07fdbed800f525bb8a1b5505bf818ebb59cbcce73003\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7af8be0836a8ff7d08ff48d9487acdedd4b83127538d071d13b3215400c0f8de\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91d57a9cede640bdcec325175598b72dd00398436b730248728c7d9afd545b1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e86597c91848f265cd70410d57ac60eadba1d0322b9b897097366a41c93d8134\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34151f1c4f743e8fa0a2c27e644bbea9c3911e2252501ad8aa95ad3e4346222a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T12:06:54Z\\\",\\\"message\\\":\\\"ed a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3028279136/tls.crt::/tmp/serving-cert-3028279136/tls.key\\\\\\\"\\\\nI0129 12:06:54.190978 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 12:06:54.198044 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 12:06:54.198078 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 12:06:54.198139 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 12:06:54.198147 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 12:06:54.208089 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 12:06:54.208122 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 12:06:54.208128 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 12:06:54.208132 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 12:06:54.208135 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 12:06:54.208139 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 12:06:54.208143 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 12:06:54.208164 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI0129 12:06:54.212380 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-3028279136/tls.crt::/tmp/serving-cert-3028279136/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1769688397\\\\\\\\\\\\\\\" (2026-01-29 12:06:37 +0000 UTC to 2026-02-28 12:06:38 +0000 UTC (now=2026-01-29 12:06:54.212317535 +0000 UTC))\\\\\\\"\\\\nF0129 12:06:54.212498 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e02d977e34888952623f4ced5e073d7bf1ff360f8deb435dc577d2ecf146cb2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:33Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6538a934d097cd5a295952f64961a0a8fb5bfa5ebcd27a0a4dd3638029b9cdb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6538a934d097cd5a295952f64961a0a8fb5bfa5ebcd27a0a4dd3638029b9cdb1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:57Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:57 crc kubenswrapper[4753]: I0129 12:07:57.090988 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:57Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:57 crc kubenswrapper[4753]: I0129 12:07:57.109140 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:57Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:57 crc kubenswrapper[4753]: I0129 12:07:57.127616 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966eb8e59f82bacf20ebfa1b1d5bf9dc0a9cdb3b430ae83ed9bde106eb3fa521\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea7c92ec02a9cf6ec5fbd8efe22a066dfef9b48f042137964c5dd49578c99079\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:57Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:57 crc kubenswrapper[4753]: I0129 12:07:57.130321 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:57 crc kubenswrapper[4753]: I0129 12:07:57.130369 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:57 crc kubenswrapper[4753]: I0129 12:07:57.130381 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:57 crc kubenswrapper[4753]: I0129 12:07:57.130400 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:57 crc kubenswrapper[4753]: I0129 12:07:57.130413 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:57Z","lastTransitionTime":"2026-01-29T12:07:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:57 crc kubenswrapper[4753]: I0129 12:07:57.142712 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0310995-a7c7-47c3-ae6c-05daaaba92a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e56ac830346c0f85227bcebc9b76c2af8646767d4678934985d7b1f5846dcb82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-txdmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8391a68625fc3d8fdbc4f16b0b7b25e359798d08ce65ad8f482722910873418a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-txdmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:58Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7c24x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:57Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:57 crc kubenswrapper[4753]: I0129 12:07:57.157261 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rnbz9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b372210b-6e1b-4a80-b379-7c1d570712f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://346c36936603f4535d5ad3e6b60e451b05b4d376e2531c813f0c900c92caf5fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqzp6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:59Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rnbz9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:57Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:57 crc kubenswrapper[4753]: I0129 12:07:57.172273 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-9dtck" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"132de771-c8b4-403f-8916-8b453e7c6fc3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://30aac9e31bfe76eba96a50a30b03419b0b23ddd888485d609f37b84af28b8d3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gvjpd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://829af7b1f520503a0f36c60dd12b25d45c3831ba0cbb61f4f0a80582cb46da1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gvjpd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:07:21Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-9dtck\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:57Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:57 crc kubenswrapper[4753]: I0129 12:07:57.187792 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b108c546-a8c1-4bf9-814b-bfb5871b3013\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9842d9d6fed8c87bc20835964d3271b0be3117fedc658e6ef028db422d1ad478\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4fd4169036d3a646c9e5e4bfa8d86a92c557c3d8f77608ae5308dd0b8c45517\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b4a6bbb431e321e54013c96e3cca1537283cbdf65d89a978780550d603663fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://31bd7db7bddd3e16cb455f7af9fd3a9bca49919c8f1ffb676dec83c543ba7763\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:57Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:57 crc kubenswrapper[4753]: I0129 12:07:57.233307 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:57 crc kubenswrapper[4753]: I0129 12:07:57.233360 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:57 crc kubenswrapper[4753]: I0129 12:07:57.233369 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:57 crc kubenswrapper[4753]: I0129 12:07:57.233388 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:57 crc kubenswrapper[4753]: I0129 12:07:57.233400 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:57Z","lastTransitionTime":"2026-01-29T12:07:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:57 crc kubenswrapper[4753]: I0129 12:07:57.336154 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:57 crc kubenswrapper[4753]: I0129 12:07:57.336203 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:57 crc kubenswrapper[4753]: I0129 12:07:57.336214 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:57 crc kubenswrapper[4753]: I0129 12:07:57.336269 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:57 crc kubenswrapper[4753]: I0129 12:07:57.336294 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:57Z","lastTransitionTime":"2026-01-29T12:07:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:57 crc kubenswrapper[4753]: I0129 12:07:57.439835 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:57 crc kubenswrapper[4753]: I0129 12:07:57.439949 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:57 crc kubenswrapper[4753]: I0129 12:07:57.439970 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:57 crc kubenswrapper[4753]: I0129 12:07:57.440033 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:57 crc kubenswrapper[4753]: I0129 12:07:57.440057 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:57Z","lastTransitionTime":"2026-01-29T12:07:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:57 crc kubenswrapper[4753]: I0129 12:07:57.544056 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:57 crc kubenswrapper[4753]: I0129 12:07:57.544372 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:57 crc kubenswrapper[4753]: I0129 12:07:57.544406 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:57 crc kubenswrapper[4753]: I0129 12:07:57.544438 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:57 crc kubenswrapper[4753]: I0129 12:07:57.544472 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:57Z","lastTransitionTime":"2026-01-29T12:07:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:57 crc kubenswrapper[4753]: I0129 12:07:57.647216 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:57 crc kubenswrapper[4753]: I0129 12:07:57.647284 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:57 crc kubenswrapper[4753]: I0129 12:07:57.647297 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:57 crc kubenswrapper[4753]: I0129 12:07:57.647319 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:57 crc kubenswrapper[4753]: I0129 12:07:57.647333 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:57Z","lastTransitionTime":"2026-01-29T12:07:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:57 crc kubenswrapper[4753]: I0129 12:07:57.659441 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-21 21:29:16.410788669 +0000 UTC Jan 29 12:07:57 crc kubenswrapper[4753]: I0129 12:07:57.750166 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:57 crc kubenswrapper[4753]: I0129 12:07:57.750213 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:57 crc kubenswrapper[4753]: I0129 12:07:57.750238 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:57 crc kubenswrapper[4753]: I0129 12:07:57.750257 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:57 crc kubenswrapper[4753]: I0129 12:07:57.750272 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:57Z","lastTransitionTime":"2026-01-29T12:07:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:57 crc kubenswrapper[4753]: I0129 12:07:57.853655 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:57 crc kubenswrapper[4753]: I0129 12:07:57.853696 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:57 crc kubenswrapper[4753]: I0129 12:07:57.853708 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:57 crc kubenswrapper[4753]: I0129 12:07:57.853731 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:57 crc kubenswrapper[4753]: I0129 12:07:57.853744 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:57Z","lastTransitionTime":"2026-01-29T12:07:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:57 crc kubenswrapper[4753]: I0129 12:07:57.888523 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 12:07:57 crc kubenswrapper[4753]: E0129 12:07:57.888715 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 12:07:57 crc kubenswrapper[4753]: I0129 12:07:57.907130 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0310995-a7c7-47c3-ae6c-05daaaba92a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e56ac830346c0f85227bcebc9b76c2af8646767d4678934985d7b1f5846dcb82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-txdmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8391a68625fc3d8fdbc4f16b0b7b25e359798d08ce65ad8f482722910873418a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-txdmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:58Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7c24x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:57Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:57 crc kubenswrapper[4753]: I0129 12:07:57.926950 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rnbz9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b372210b-6e1b-4a80-b379-7c1d570712f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://346c36936603f4535d5ad3e6b60e451b05b4d376e2531c813f0c900c92caf5fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqzp6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:59Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rnbz9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:57Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:57 crc kubenswrapper[4753]: I0129 12:07:57.943343 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-9dtck" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"132de771-c8b4-403f-8916-8b453e7c6fc3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://30aac9e31bfe76eba96a50a30b03419b0b23ddd888485d609f37b84af28b8d3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gvjpd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://829af7b1f520503a0f36c60dd12b25d45c3831ba0cbb61f4f0a80582cb46da1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gvjpd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:07:21Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-9dtck\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:57Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:57 crc kubenswrapper[4753]: I0129 12:07:57.956358 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:57 crc kubenswrapper[4753]: I0129 12:07:57.956409 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:57 crc kubenswrapper[4753]: I0129 12:07:57.956419 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:57 crc kubenswrapper[4753]: I0129 12:07:57.956440 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:57 crc kubenswrapper[4753]: I0129 12:07:57.956452 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:57Z","lastTransitionTime":"2026-01-29T12:07:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:57 crc kubenswrapper[4753]: I0129 12:07:57.961797 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b108c546-a8c1-4bf9-814b-bfb5871b3013\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9842d9d6fed8c87bc20835964d3271b0be3117fedc658e6ef028db422d1ad478\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4fd4169036d3a646c9e5e4bfa8d86a92c557c3d8f77608ae5308dd0b8c45517\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b4a6bbb431e321e54013c96e3cca1537283cbdf65d89a978780550d603663fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://31bd7db7bddd3e16cb455f7af9fd3a9bca49919c8f1ffb676dec83c543ba7763\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:57Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:57 crc kubenswrapper[4753]: I0129 12:07:57.979725 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:57Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:57 crc kubenswrapper[4753]: I0129 12:07:57.995567 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:57Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:58 crc kubenswrapper[4753]: I0129 12:07:58.014060 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966eb8e59f82bacf20ebfa1b1d5bf9dc0a9cdb3b430ae83ed9bde106eb3fa521\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea7c92ec02a9cf6ec5fbd8efe22a066dfef9b48f042137964c5dd49578c99079\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:58Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:58 crc kubenswrapper[4753]: I0129 12:07:58.036748 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5d7ccf10988fd83d31d42be6f82403067a0cc50313c4d5dcddfb528507efffa4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:58Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:58 crc kubenswrapper[4753]: I0129 12:07:58.060119 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:58 crc kubenswrapper[4753]: I0129 12:07:58.060170 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:58 crc kubenswrapper[4753]: I0129 12:07:58.060185 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:58 crc kubenswrapper[4753]: I0129 12:07:58.060207 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:58 crc kubenswrapper[4753]: I0129 12:07:58.060238 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:58Z","lastTransitionTime":"2026-01-29T12:07:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:58 crc kubenswrapper[4753]: I0129 12:07:58.062271 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80bec2ab-0a88-4818-9339-760edda3b07e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f15016e29f1fc14f51b4d01e772071333fd8a885cacc1804b3a03dbcfc93836\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1e6a7d8df812546f17b3e026005c5062086e97e3fa364b3fa22cefec025e4eba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f71859413676dab4a435d43c11ad7cba315a12de744c3c8b161273f6ad36580c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0ec2a21ec299b5eed363668f39a270a8b03ea79b9de3dd23bda0c30579c3de3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91c1df8b6763320498420463a3841f0bfa4fcf753c355f549c3c1e93bf5206a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://05d1e99596530f467ecdae51aec8db191ac045cd85e121fc9835e2f3688f6ae7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7138397dae5e90b30b0cdec02d29c108d423b3c94b07ea63e1560c4f02ed607\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e7138397dae5e90b30b0cdec02d29c108d423b3c94b07ea63e1560c4f02ed607\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T12:07:43Z\\\",\\\"message\\\":\\\"rnalversions/factory.go:140\\\\nI0129 12:07:43.077687 6477 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0129 12:07:43.077875 6477 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0129 12:07:43.078075 6477 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0129 12:07:43.078370 6477 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0129 12:07:43.078709 6477 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0129 12:07:43.078725 6477 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0129 12:07:43.078784 6477 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0129 12:07:43.078798 6477 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0129 12:07:43.078806 6477 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0129 12:07:43.078847 6477 factory.go:656] Stopping watch factory\\\\nI0129 12:07:43.078862 6477 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0129 12:07:43.078866 6477 ovnkube.go:599] Stopped ovnkube\\\\nI0129 12:07:43.078876 6477 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0129 12\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:41Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-nzkvz_openshift-ovn-kubernetes(80bec2ab-0a88-4818-9339-760edda3b07e)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e40e5b13dfdc135ea09253d466bacdfc5c2ac3d28eabc76d833d71ac300905c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:07:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-nzkvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:58Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:58 crc kubenswrapper[4753]: I0129 12:07:58.079834 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c93f64b-e5af-4d6b-83e9-7fdfeb8548ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a9ec3e416b3b385c71b2fa28c0215fcb04b708e33fec2d85fdf0a75b848b027\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4fd3a920ea0c98edb2c8ad37d2a1a5d1ed50de5bc8c3e2f4d34c876b2cab54f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8dea62586ba5ac5a8ddc9e75b216a27593c8fe99516abc38c9c411a93842a372\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://44c03bba5a02d879c3b235cd65897fb30651871689c8f02b9526966817f79636\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44c03bba5a02d879c3b235cd65897fb30651871689c8f02b9526966817f79636\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:58Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:58 crc kubenswrapper[4753]: I0129 12:07:58.095469 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7962d88e183df5c4d91f5767f2007068ea2fdfd5c31d9d6ed609c2ab9ca26999\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:58Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:58 crc kubenswrapper[4753]: I0129 12:07:58.109326 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-5m2jf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0203345d-1e9f-4cfe-bde7-90f87221d1a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fdf3d79084d3e17b1f8cb0b285fd425280f34724c6a441104a2ec52b5217bfdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zzwn6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:58Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-5m2jf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:58Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:58 crc kubenswrapper[4753]: I0129 12:07:58.127783 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-p6m5g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2412fa08-e643-4225-b494-eb999ea93fce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zjjxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zjjxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:07:23Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-p6m5g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:58Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:58 crc kubenswrapper[4753]: I0129 12:07:58.143591 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"acf2e7cc-e85d-43b0-84c4-7d2de575457f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://308d58d7f5a84b4d1980716b231abfe2ea2e9355bff9f5c998584d6986a9a389\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa000ea086031e13fdded4648ef18460b2192b270143a3866a3f448d5a006eb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fa000ea086031e13fdded4648ef18460b2192b270143a3866a3f448d5a006eb5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:58Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:58 crc kubenswrapper[4753]: I0129 12:07:58.162321 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"703a6b9d-a15d-4ed3-b00b-db7bd5d42c61\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cab3ab80a8fddbc591cd07fdbed800f525bb8a1b5505bf818ebb59cbcce73003\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7af8be0836a8ff7d08ff48d9487acdedd4b83127538d071d13b3215400c0f8de\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91d57a9cede640bdcec325175598b72dd00398436b730248728c7d9afd545b1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e86597c91848f265cd70410d57ac60eadba1d0322b9b897097366a41c93d8134\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34151f1c4f743e8fa0a2c27e644bbea9c3911e2252501ad8aa95ad3e4346222a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T12:06:54Z\\\",\\\"message\\\":\\\"ed a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3028279136/tls.crt::/tmp/serving-cert-3028279136/tls.key\\\\\\\"\\\\nI0129 12:06:54.190978 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 12:06:54.198044 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 12:06:54.198078 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 12:06:54.198139 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 12:06:54.198147 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 12:06:54.208089 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 12:06:54.208122 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 12:06:54.208128 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 12:06:54.208132 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 12:06:54.208135 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 12:06:54.208139 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 12:06:54.208143 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 12:06:54.208164 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI0129 12:06:54.212380 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-3028279136/tls.crt::/tmp/serving-cert-3028279136/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1769688397\\\\\\\\\\\\\\\" (2026-01-29 12:06:37 +0000 UTC to 2026-02-28 12:06:38 +0000 UTC (now=2026-01-29 12:06:54.212317535 +0000 UTC))\\\\\\\"\\\\nF0129 12:06:54.212498 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e02d977e34888952623f4ced5e073d7bf1ff360f8deb435dc577d2ecf146cb2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:33Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6538a934d097cd5a295952f64961a0a8fb5bfa5ebcd27a0a4dd3638029b9cdb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6538a934d097cd5a295952f64961a0a8fb5bfa5ebcd27a0a4dd3638029b9cdb1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:58Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:58 crc kubenswrapper[4753]: I0129 12:07:58.164206 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:58 crc kubenswrapper[4753]: I0129 12:07:58.164304 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:58 crc kubenswrapper[4753]: I0129 12:07:58.164338 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:58 crc kubenswrapper[4753]: I0129 12:07:58.164359 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:58 crc kubenswrapper[4753]: I0129 12:07:58.164372 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:58Z","lastTransitionTime":"2026-01-29T12:07:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:58 crc kubenswrapper[4753]: I0129 12:07:58.177721 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:58Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:58 crc kubenswrapper[4753]: I0129 12:07:58.192693 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-x8kzr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d753320a-61c2-4c0e-bd48-96d74b352114\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c7a667b7243fc36ff18cb4c017152fff3d5f4eb5e28e306ace44420cd722c5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tgns4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-x8kzr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:58Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:58 crc kubenswrapper[4753]: I0129 12:07:58.212936 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vwcjk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a99ab890-0a5a-4abb-86fb-a3731ff6b2c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e921d361518102483679b5ff7387e27858e315d637efb0f18e06215fe00f70a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd5300a3195f0f4d66a2789abcd65b4ade5652b5d05eb08bfd29932b97137464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd5300a3195f0f4d66a2789abcd65b4ade5652b5d05eb08bfd29932b97137464\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d95b548f1e6e754ff4a6f8d68364586d68b03662d0a679197fd0c99c41745691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d95b548f1e6e754ff4a6f8d68364586d68b03662d0a679197fd0c99c41745691\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b8b1c7640bd4881ba79f3471e309b2d1538aea8d0c8e01d3d7cfc3677d95725a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b8b1c7640bd4881ba79f3471e309b2d1538aea8d0c8e01d3d7cfc3677d95725a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0ba51b6e2180da460faa3599850175db9d7410880ba0214425fa275803f36d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0ba51b6e2180da460faa3599850175db9d7410880ba0214425fa275803f36d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0ed771b481178d6e12c669e122932ef25e1fe09db24c8f025f50a469400af184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0ed771b481178d6e12c669e122932ef25e1fe09db24c8f025f50a469400af184\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1990423fefacfb9b390af88b9af343c1bb4c02014db6bf8226918a7be9f4ad5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1990423fefacfb9b390af88b9af343c1bb4c02014db6bf8226918a7be9f4ad5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:59Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vwcjk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:58Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:58 crc kubenswrapper[4753]: I0129 12:07:58.268530 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:58 crc kubenswrapper[4753]: I0129 12:07:58.268594 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:58 crc kubenswrapper[4753]: I0129 12:07:58.268609 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:58 crc kubenswrapper[4753]: I0129 12:07:58.268629 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:58 crc kubenswrapper[4753]: I0129 12:07:58.268643 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:58Z","lastTransitionTime":"2026-01-29T12:07:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:58 crc kubenswrapper[4753]: I0129 12:07:58.372504 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:58 crc kubenswrapper[4753]: I0129 12:07:58.372550 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:58 crc kubenswrapper[4753]: I0129 12:07:58.372562 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:58 crc kubenswrapper[4753]: I0129 12:07:58.372581 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:58 crc kubenswrapper[4753]: I0129 12:07:58.372593 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:58Z","lastTransitionTime":"2026-01-29T12:07:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:58 crc kubenswrapper[4753]: I0129 12:07:58.475773 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:58 crc kubenswrapper[4753]: I0129 12:07:58.475858 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:58 crc kubenswrapper[4753]: I0129 12:07:58.475874 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:58 crc kubenswrapper[4753]: I0129 12:07:58.475906 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:58 crc kubenswrapper[4753]: I0129 12:07:58.475923 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:58Z","lastTransitionTime":"2026-01-29T12:07:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:58 crc kubenswrapper[4753]: I0129 12:07:58.578899 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:58 crc kubenswrapper[4753]: I0129 12:07:58.578943 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:58 crc kubenswrapper[4753]: I0129 12:07:58.578953 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:58 crc kubenswrapper[4753]: I0129 12:07:58.578981 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:58 crc kubenswrapper[4753]: I0129 12:07:58.578991 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:58Z","lastTransitionTime":"2026-01-29T12:07:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:58 crc kubenswrapper[4753]: I0129 12:07:58.660324 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-16 05:28:50.146900537 +0000 UTC Jan 29 12:07:58 crc kubenswrapper[4753]: I0129 12:07:58.681576 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:58 crc kubenswrapper[4753]: I0129 12:07:58.681620 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:58 crc kubenswrapper[4753]: I0129 12:07:58.681637 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:58 crc kubenswrapper[4753]: I0129 12:07:58.681659 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:58 crc kubenswrapper[4753]: I0129 12:07:58.681669 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:58Z","lastTransitionTime":"2026-01-29T12:07:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:58 crc kubenswrapper[4753]: I0129 12:07:58.784935 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:58 crc kubenswrapper[4753]: I0129 12:07:58.785044 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:58 crc kubenswrapper[4753]: I0129 12:07:58.785059 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:58 crc kubenswrapper[4753]: I0129 12:07:58.785081 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:58 crc kubenswrapper[4753]: I0129 12:07:58.785094 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:58Z","lastTransitionTime":"2026-01-29T12:07:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:58 crc kubenswrapper[4753]: I0129 12:07:58.887541 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 12:07:58 crc kubenswrapper[4753]: I0129 12:07:58.887564 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-p6m5g" Jan 29 12:07:58 crc kubenswrapper[4753]: I0129 12:07:58.887693 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 12:07:58 crc kubenswrapper[4753]: E0129 12:07:58.887744 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 12:07:58 crc kubenswrapper[4753]: E0129 12:07:58.887886 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 12:07:58 crc kubenswrapper[4753]: E0129 12:07:58.887944 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-p6m5g" podUID="2412fa08-e643-4225-b494-eb999ea93fce" Jan 29 12:07:58 crc kubenswrapper[4753]: I0129 12:07:58.888854 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:58 crc kubenswrapper[4753]: I0129 12:07:58.888894 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:58 crc kubenswrapper[4753]: I0129 12:07:58.888928 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:58 crc kubenswrapper[4753]: I0129 12:07:58.888943 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:58 crc kubenswrapper[4753]: I0129 12:07:58.888954 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:58Z","lastTransitionTime":"2026-01-29T12:07:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:58 crc kubenswrapper[4753]: I0129 12:07:58.988903 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:07:58 crc kubenswrapper[4753]: E0129 12:07:58.989250 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:09:02.989203329 +0000 UTC m=+157.241284784 (durationBeforeRetry 1m4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:07:58 crc kubenswrapper[4753]: I0129 12:07:58.990694 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:58 crc kubenswrapper[4753]: I0129 12:07:58.990744 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:58 crc kubenswrapper[4753]: I0129 12:07:58.990755 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:58 crc kubenswrapper[4753]: I0129 12:07:58.990774 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:58 crc kubenswrapper[4753]: I0129 12:07:58.990787 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:58Z","lastTransitionTime":"2026-01-29T12:07:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:59 crc kubenswrapper[4753]: I0129 12:07:59.093597 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:59 crc kubenswrapper[4753]: I0129 12:07:59.093673 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:59 crc kubenswrapper[4753]: I0129 12:07:59.093687 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:59 crc kubenswrapper[4753]: I0129 12:07:59.093710 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:59 crc kubenswrapper[4753]: I0129 12:07:59.093724 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:59Z","lastTransitionTime":"2026-01-29T12:07:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:59 crc kubenswrapper[4753]: I0129 12:07:59.196306 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:59 crc kubenswrapper[4753]: I0129 12:07:59.196341 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:59 crc kubenswrapper[4753]: I0129 12:07:59.196352 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:59 crc kubenswrapper[4753]: I0129 12:07:59.196366 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:59 crc kubenswrapper[4753]: I0129 12:07:59.196375 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:59Z","lastTransitionTime":"2026-01-29T12:07:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:59 crc kubenswrapper[4753]: I0129 12:07:59.299108 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:59 crc kubenswrapper[4753]: I0129 12:07:59.299159 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:59 crc kubenswrapper[4753]: I0129 12:07:59.299181 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:59 crc kubenswrapper[4753]: I0129 12:07:59.299204 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:59 crc kubenswrapper[4753]: I0129 12:07:59.299217 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:59Z","lastTransitionTime":"2026-01-29T12:07:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:59 crc kubenswrapper[4753]: E0129 12:07:59.338000 4753 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb372210b_6e1b_4a80_b379_7c1d570712f3.slice/crio-346c36936603f4535d5ad3e6b60e451b05b4d376e2531c813f0c900c92caf5fd.scope\": RecentStats: unable to find data in memory cache]" Jan 29 12:07:59 crc kubenswrapper[4753]: I0129 12:07:59.393345 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 12:07:59 crc kubenswrapper[4753]: I0129 12:07:59.393440 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 12:07:59 crc kubenswrapper[4753]: E0129 12:07:59.393583 4753 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 29 12:07:59 crc kubenswrapper[4753]: E0129 12:07:59.393613 4753 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 29 12:07:59 crc kubenswrapper[4753]: E0129 12:07:59.393695 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-29 12:09:03.393674785 +0000 UTC m=+157.645756230 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 29 12:07:59 crc kubenswrapper[4753]: E0129 12:07:59.393714 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-29 12:09:03.393707266 +0000 UTC m=+157.645788721 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 29 12:07:59 crc kubenswrapper[4753]: I0129 12:07:59.402709 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:59 crc kubenswrapper[4753]: I0129 12:07:59.402776 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:59 crc kubenswrapper[4753]: I0129 12:07:59.402790 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:59 crc kubenswrapper[4753]: I0129 12:07:59.402808 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:59 crc kubenswrapper[4753]: I0129 12:07:59.402821 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:59Z","lastTransitionTime":"2026-01-29T12:07:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:59 crc kubenswrapper[4753]: I0129 12:07:59.505953 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:59 crc kubenswrapper[4753]: I0129 12:07:59.506015 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:59 crc kubenswrapper[4753]: I0129 12:07:59.506031 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:59 crc kubenswrapper[4753]: I0129 12:07:59.506052 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:59 crc kubenswrapper[4753]: I0129 12:07:59.506066 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:59Z","lastTransitionTime":"2026-01-29T12:07:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:59 crc kubenswrapper[4753]: I0129 12:07:59.609103 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:59 crc kubenswrapper[4753]: I0129 12:07:59.609178 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:59 crc kubenswrapper[4753]: I0129 12:07:59.609189 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:59 crc kubenswrapper[4753]: I0129 12:07:59.609207 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:59 crc kubenswrapper[4753]: I0129 12:07:59.609244 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:59Z","lastTransitionTime":"2026-01-29T12:07:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:59 crc kubenswrapper[4753]: I0129 12:07:59.661258 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-15 13:33:20.211570444 +0000 UTC Jan 29 12:07:59 crc kubenswrapper[4753]: I0129 12:07:59.711981 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:59 crc kubenswrapper[4753]: I0129 12:07:59.712023 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:59 crc kubenswrapper[4753]: I0129 12:07:59.712032 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:59 crc kubenswrapper[4753]: I0129 12:07:59.712048 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:59 crc kubenswrapper[4753]: I0129 12:07:59.712058 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:59Z","lastTransitionTime":"2026-01-29T12:07:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:59 crc kubenswrapper[4753]: I0129 12:07:59.731383 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-rnbz9_b372210b-6e1b-4a80-b379-7c1d570712f3/kube-multus/0.log" Jan 29 12:07:59 crc kubenswrapper[4753]: I0129 12:07:59.731428 4753 generic.go:334] "Generic (PLEG): container finished" podID="b372210b-6e1b-4a80-b379-7c1d570712f3" containerID="346c36936603f4535d5ad3e6b60e451b05b4d376e2531c813f0c900c92caf5fd" exitCode=1 Jan 29 12:07:59 crc kubenswrapper[4753]: I0129 12:07:59.731466 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-rnbz9" event={"ID":"b372210b-6e1b-4a80-b379-7c1d570712f3","Type":"ContainerDied","Data":"346c36936603f4535d5ad3e6b60e451b05b4d376e2531c813f0c900c92caf5fd"} Jan 29 12:07:59 crc kubenswrapper[4753]: I0129 12:07:59.731923 4753 scope.go:117] "RemoveContainer" containerID="346c36936603f4535d5ad3e6b60e451b05b4d376e2531c813f0c900c92caf5fd" Jan 29 12:07:59 crc kubenswrapper[4753]: I0129 12:07:59.751317 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-x8kzr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d753320a-61c2-4c0e-bd48-96d74b352114\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c7a667b7243fc36ff18cb4c017152fff3d5f4eb5e28e306ace44420cd722c5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tgns4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-x8kzr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:59Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:59 crc kubenswrapper[4753]: I0129 12:07:59.770751 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vwcjk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a99ab890-0a5a-4abb-86fb-a3731ff6b2c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e921d361518102483679b5ff7387e27858e315d637efb0f18e06215fe00f70a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd5300a3195f0f4d66a2789abcd65b4ade5652b5d05eb08bfd29932b97137464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd5300a3195f0f4d66a2789abcd65b4ade5652b5d05eb08bfd29932b97137464\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d95b548f1e6e754ff4a6f8d68364586d68b03662d0a679197fd0c99c41745691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d95b548f1e6e754ff4a6f8d68364586d68b03662d0a679197fd0c99c41745691\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b8b1c7640bd4881ba79f3471e309b2d1538aea8d0c8e01d3d7cfc3677d95725a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b8b1c7640bd4881ba79f3471e309b2d1538aea8d0c8e01d3d7cfc3677d95725a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0ba51b6e2180da460faa3599850175db9d7410880ba0214425fa275803f36d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0ba51b6e2180da460faa3599850175db9d7410880ba0214425fa275803f36d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0ed771b481178d6e12c669e122932ef25e1fe09db24c8f025f50a469400af184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0ed771b481178d6e12c669e122932ef25e1fe09db24c8f025f50a469400af184\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1990423fefacfb9b390af88b9af343c1bb4c02014db6bf8226918a7be9f4ad5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1990423fefacfb9b390af88b9af343c1bb4c02014db6bf8226918a7be9f4ad5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:59Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vwcjk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:59Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:59 crc kubenswrapper[4753]: I0129 12:07:59.782485 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"acf2e7cc-e85d-43b0-84c4-7d2de575457f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://308d58d7f5a84b4d1980716b231abfe2ea2e9355bff9f5c998584d6986a9a389\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa000ea086031e13fdded4648ef18460b2192b270143a3866a3f448d5a006eb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fa000ea086031e13fdded4648ef18460b2192b270143a3866a3f448d5a006eb5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:59Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:59 crc kubenswrapper[4753]: I0129 12:07:59.801447 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"703a6b9d-a15d-4ed3-b00b-db7bd5d42c61\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cab3ab80a8fddbc591cd07fdbed800f525bb8a1b5505bf818ebb59cbcce73003\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7af8be0836a8ff7d08ff48d9487acdedd4b83127538d071d13b3215400c0f8de\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91d57a9cede640bdcec325175598b72dd00398436b730248728c7d9afd545b1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e86597c91848f265cd70410d57ac60eadba1d0322b9b897097366a41c93d8134\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34151f1c4f743e8fa0a2c27e644bbea9c3911e2252501ad8aa95ad3e4346222a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T12:06:54Z\\\",\\\"message\\\":\\\"ed a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3028279136/tls.crt::/tmp/serving-cert-3028279136/tls.key\\\\\\\"\\\\nI0129 12:06:54.190978 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 12:06:54.198044 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 12:06:54.198078 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 12:06:54.198139 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 12:06:54.198147 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 12:06:54.208089 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 12:06:54.208122 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 12:06:54.208128 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 12:06:54.208132 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 12:06:54.208135 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 12:06:54.208139 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 12:06:54.208143 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 12:06:54.208164 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI0129 12:06:54.212380 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-3028279136/tls.crt::/tmp/serving-cert-3028279136/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1769688397\\\\\\\\\\\\\\\" (2026-01-29 12:06:37 +0000 UTC to 2026-02-28 12:06:38 +0000 UTC (now=2026-01-29 12:06:54.212317535 +0000 UTC))\\\\\\\"\\\\nF0129 12:06:54.212498 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e02d977e34888952623f4ced5e073d7bf1ff360f8deb435dc577d2ecf146cb2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:33Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6538a934d097cd5a295952f64961a0a8fb5bfa5ebcd27a0a4dd3638029b9cdb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6538a934d097cd5a295952f64961a0a8fb5bfa5ebcd27a0a4dd3638029b9cdb1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:59Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:59 crc kubenswrapper[4753]: I0129 12:07:59.817850 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:59 crc kubenswrapper[4753]: I0129 12:07:59.817915 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:59 crc kubenswrapper[4753]: I0129 12:07:59.817930 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:59 crc kubenswrapper[4753]: I0129 12:07:59.817951 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:59 crc kubenswrapper[4753]: I0129 12:07:59.817964 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:59Z","lastTransitionTime":"2026-01-29T12:07:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:59 crc kubenswrapper[4753]: I0129 12:07:59.821482 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:59Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:59 crc kubenswrapper[4753]: I0129 12:07:59.837483 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:59Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:59 crc kubenswrapper[4753]: I0129 12:07:59.853832 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966eb8e59f82bacf20ebfa1b1d5bf9dc0a9cdb3b430ae83ed9bde106eb3fa521\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea7c92ec02a9cf6ec5fbd8efe22a066dfef9b48f042137964c5dd49578c99079\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:59Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:59 crc kubenswrapper[4753]: I0129 12:07:59.872549 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0310995-a7c7-47c3-ae6c-05daaaba92a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e56ac830346c0f85227bcebc9b76c2af8646767d4678934985d7b1f5846dcb82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-txdmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8391a68625fc3d8fdbc4f16b0b7b25e359798d08ce65ad8f482722910873418a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-txdmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:58Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7c24x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:59Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:59 crc kubenswrapper[4753]: I0129 12:07:59.888160 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 12:07:59 crc kubenswrapper[4753]: E0129 12:07:59.888373 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 12:07:59 crc kubenswrapper[4753]: I0129 12:07:59.888928 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rnbz9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b372210b-6e1b-4a80-b379-7c1d570712f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://346c36936603f4535d5ad3e6b60e451b05b4d376e2531c813f0c900c92caf5fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://346c36936603f4535d5ad3e6b60e451b05b4d376e2531c813f0c900c92caf5fd\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T12:07:59Z\\\",\\\"message\\\":\\\"2026-01-29T12:07:13+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_fa04fbd8-83e0-4879-a6bb-30f3a6792b92\\\\n2026-01-29T12:07:13+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_fa04fbd8-83e0-4879-a6bb-30f3a6792b92 to /host/opt/cni/bin/\\\\n2026-01-29T12:07:14Z [verbose] multus-daemon started\\\\n2026-01-29T12:07:14Z [verbose] Readiness Indicator file check\\\\n2026-01-29T12:07:59Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqzp6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:59Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rnbz9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:59Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:59 crc kubenswrapper[4753]: I0129 12:07:59.907101 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-9dtck" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"132de771-c8b4-403f-8916-8b453e7c6fc3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://30aac9e31bfe76eba96a50a30b03419b0b23ddd888485d609f37b84af28b8d3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gvjpd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://829af7b1f520503a0f36c60dd12b25d45c3831ba0cbb61f4f0a80582cb46da1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gvjpd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:07:21Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-9dtck\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:59Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:59 crc kubenswrapper[4753]: I0129 12:07:59.920391 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:07:59 crc kubenswrapper[4753]: I0129 12:07:59.920457 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:07:59 crc kubenswrapper[4753]: I0129 12:07:59.920471 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:07:59 crc kubenswrapper[4753]: I0129 12:07:59.920494 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:07:59 crc kubenswrapper[4753]: I0129 12:07:59.920506 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:07:59Z","lastTransitionTime":"2026-01-29T12:07:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:07:59 crc kubenswrapper[4753]: I0129 12:07:59.925018 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b108c546-a8c1-4bf9-814b-bfb5871b3013\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9842d9d6fed8c87bc20835964d3271b0be3117fedc658e6ef028db422d1ad478\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4fd4169036d3a646c9e5e4bfa8d86a92c557c3d8f77608ae5308dd0b8c45517\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b4a6bbb431e321e54013c96e3cca1537283cbdf65d89a978780550d603663fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://31bd7db7bddd3e16cb455f7af9fd3a9bca49919c8f1ffb676dec83c543ba7763\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:59Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:59 crc kubenswrapper[4753]: I0129 12:07:59.938894 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:59Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:59 crc kubenswrapper[4753]: I0129 12:07:59.953455 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5d7ccf10988fd83d31d42be6f82403067a0cc50313c4d5dcddfb528507efffa4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:59Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:59 crc kubenswrapper[4753]: I0129 12:07:59.979674 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80bec2ab-0a88-4818-9339-760edda3b07e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f15016e29f1fc14f51b4d01e772071333fd8a885cacc1804b3a03dbcfc93836\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1e6a7d8df812546f17b3e026005c5062086e97e3fa364b3fa22cefec025e4eba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f71859413676dab4a435d43c11ad7cba315a12de744c3c8b161273f6ad36580c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0ec2a21ec299b5eed363668f39a270a8b03ea79b9de3dd23bda0c30579c3de3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91c1df8b6763320498420463a3841f0bfa4fcf753c355f549c3c1e93bf5206a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://05d1e99596530f467ecdae51aec8db191ac045cd85e121fc9835e2f3688f6ae7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7138397dae5e90b30b0cdec02d29c108d423b3c94b07ea63e1560c4f02ed607\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e7138397dae5e90b30b0cdec02d29c108d423b3c94b07ea63e1560c4f02ed607\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T12:07:43Z\\\",\\\"message\\\":\\\"rnalversions/factory.go:140\\\\nI0129 12:07:43.077687 6477 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0129 12:07:43.077875 6477 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0129 12:07:43.078075 6477 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0129 12:07:43.078370 6477 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0129 12:07:43.078709 6477 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0129 12:07:43.078725 6477 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0129 12:07:43.078784 6477 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0129 12:07:43.078798 6477 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0129 12:07:43.078806 6477 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0129 12:07:43.078847 6477 factory.go:656] Stopping watch factory\\\\nI0129 12:07:43.078862 6477 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0129 12:07:43.078866 6477 ovnkube.go:599] Stopped ovnkube\\\\nI0129 12:07:43.078876 6477 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0129 12\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:41Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-nzkvz_openshift-ovn-kubernetes(80bec2ab-0a88-4818-9339-760edda3b07e)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e40e5b13dfdc135ea09253d466bacdfc5c2ac3d28eabc76d833d71ac300905c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:07:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-nzkvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:59Z is after 2025-08-24T17:21:41Z" Jan 29 12:07:59 crc kubenswrapper[4753]: I0129 12:07:59.991406 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-p6m5g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2412fa08-e643-4225-b494-eb999ea93fce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zjjxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zjjxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:07:23Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-p6m5g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:07:59Z is after 2025-08-24T17:21:41Z" Jan 29 12:08:00 crc kubenswrapper[4753]: I0129 12:08:00.031250 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:00 crc kubenswrapper[4753]: I0129 12:08:00.031288 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:00 crc kubenswrapper[4753]: I0129 12:08:00.031300 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:00 crc kubenswrapper[4753]: I0129 12:08:00.031317 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:00 crc kubenswrapper[4753]: I0129 12:08:00.031327 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:00Z","lastTransitionTime":"2026-01-29T12:08:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:00 crc kubenswrapper[4753]: I0129 12:08:00.033077 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c93f64b-e5af-4d6b-83e9-7fdfeb8548ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a9ec3e416b3b385c71b2fa28c0215fcb04b708e33fec2d85fdf0a75b848b027\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4fd3a920ea0c98edb2c8ad37d2a1a5d1ed50de5bc8c3e2f4d34c876b2cab54f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8dea62586ba5ac5a8ddc9e75b216a27593c8fe99516abc38c9c411a93842a372\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://44c03bba5a02d879c3b235cd65897fb30651871689c8f02b9526966817f79636\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44c03bba5a02d879c3b235cd65897fb30651871689c8f02b9526966817f79636\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:08:00Z is after 2025-08-24T17:21:41Z" Jan 29 12:08:00 crc kubenswrapper[4753]: I0129 12:08:00.047212 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7962d88e183df5c4d91f5767f2007068ea2fdfd5c31d9d6ed609c2ab9ca26999\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:08:00Z is after 2025-08-24T17:21:41Z" Jan 29 12:08:00 crc kubenswrapper[4753]: I0129 12:08:00.060747 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-5m2jf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0203345d-1e9f-4cfe-bde7-90f87221d1a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fdf3d79084d3e17b1f8cb0b285fd425280f34724c6a441104a2ec52b5217bfdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zzwn6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:58Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-5m2jf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:08:00Z is after 2025-08-24T17:21:41Z" Jan 29 12:08:00 crc kubenswrapper[4753]: I0129 12:08:00.134314 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:00 crc kubenswrapper[4753]: I0129 12:08:00.134368 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:00 crc kubenswrapper[4753]: I0129 12:08:00.134383 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:00 crc kubenswrapper[4753]: I0129 12:08:00.134401 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:00 crc kubenswrapper[4753]: I0129 12:08:00.134410 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:00Z","lastTransitionTime":"2026-01-29T12:08:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:00 crc kubenswrapper[4753]: I0129 12:08:00.237784 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:00 crc kubenswrapper[4753]: I0129 12:08:00.237825 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:00 crc kubenswrapper[4753]: I0129 12:08:00.237837 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:00 crc kubenswrapper[4753]: I0129 12:08:00.237858 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:00 crc kubenswrapper[4753]: I0129 12:08:00.237872 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:00Z","lastTransitionTime":"2026-01-29T12:08:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:00 crc kubenswrapper[4753]: I0129 12:08:00.340576 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:00 crc kubenswrapper[4753]: I0129 12:08:00.340625 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:00 crc kubenswrapper[4753]: I0129 12:08:00.340638 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:00 crc kubenswrapper[4753]: I0129 12:08:00.340661 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:00 crc kubenswrapper[4753]: I0129 12:08:00.340675 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:00Z","lastTransitionTime":"2026-01-29T12:08:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:00 crc kubenswrapper[4753]: I0129 12:08:00.443827 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:00 crc kubenswrapper[4753]: I0129 12:08:00.443866 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:00 crc kubenswrapper[4753]: I0129 12:08:00.443878 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:00 crc kubenswrapper[4753]: I0129 12:08:00.443896 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:00 crc kubenswrapper[4753]: I0129 12:08:00.443908 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:00Z","lastTransitionTime":"2026-01-29T12:08:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:00 crc kubenswrapper[4753]: I0129 12:08:00.546092 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:00 crc kubenswrapper[4753]: I0129 12:08:00.546145 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:00 crc kubenswrapper[4753]: I0129 12:08:00.546160 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:00 crc kubenswrapper[4753]: I0129 12:08:00.546183 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:00 crc kubenswrapper[4753]: I0129 12:08:00.546197 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:00Z","lastTransitionTime":"2026-01-29T12:08:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:00 crc kubenswrapper[4753]: I0129 12:08:00.649775 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:00 crc kubenswrapper[4753]: I0129 12:08:00.649821 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:00 crc kubenswrapper[4753]: I0129 12:08:00.649832 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:00 crc kubenswrapper[4753]: I0129 12:08:00.649851 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:00 crc kubenswrapper[4753]: I0129 12:08:00.649863 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:00Z","lastTransitionTime":"2026-01-29T12:08:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:00 crc kubenswrapper[4753]: I0129 12:08:00.661712 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-02 06:53:27.026472599 +0000 UTC Jan 29 12:08:00 crc kubenswrapper[4753]: I0129 12:08:00.916036 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-p6m5g" Jan 29 12:08:00 crc kubenswrapper[4753]: E0129 12:08:00.916312 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-p6m5g" podUID="2412fa08-e643-4225-b494-eb999ea93fce" Jan 29 12:08:00 crc kubenswrapper[4753]: I0129 12:08:00.916419 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 12:08:00 crc kubenswrapper[4753]: E0129 12:08:00.916491 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 12:08:00 crc kubenswrapper[4753]: I0129 12:08:00.916551 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 12:08:00 crc kubenswrapper[4753]: E0129 12:08:00.916623 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 12:08:00 crc kubenswrapper[4753]: I0129 12:08:00.917737 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 12:08:00 crc kubenswrapper[4753]: I0129 12:08:00.917930 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 12:08:00 crc kubenswrapper[4753]: E0129 12:08:00.918128 4753 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 29 12:08:00 crc kubenswrapper[4753]: E0129 12:08:00.918178 4753 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 29 12:08:00 crc kubenswrapper[4753]: E0129 12:08:00.918199 4753 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 12:08:00 crc kubenswrapper[4753]: E0129 12:08:00.918251 4753 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 29 12:08:00 crc kubenswrapper[4753]: E0129 12:08:00.918293 4753 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 29 12:08:00 crc kubenswrapper[4753]: E0129 12:08:00.918311 4753 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 12:08:00 crc kubenswrapper[4753]: E0129 12:08:00.918312 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-29 12:09:04.918290674 +0000 UTC m=+159.170372289 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 12:08:00 crc kubenswrapper[4753]: E0129 12:08:00.918403 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-29 12:09:04.918379426 +0000 UTC m=+159.170461061 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 12:08:00 crc kubenswrapper[4753]: I0129 12:08:00.919859 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:00 crc kubenswrapper[4753]: I0129 12:08:00.919898 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:00 crc kubenswrapper[4753]: I0129 12:08:00.919911 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:00 crc kubenswrapper[4753]: I0129 12:08:00.919929 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:00 crc kubenswrapper[4753]: I0129 12:08:00.919940 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:00Z","lastTransitionTime":"2026-01-29T12:08:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:00 crc kubenswrapper[4753]: I0129 12:08:00.928582 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-rnbz9_b372210b-6e1b-4a80-b379-7c1d570712f3/kube-multus/0.log" Jan 29 12:08:00 crc kubenswrapper[4753]: I0129 12:08:00.928659 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-rnbz9" event={"ID":"b372210b-6e1b-4a80-b379-7c1d570712f3","Type":"ContainerStarted","Data":"32065049674d4bf4bc624652e7d77fa28f08e40f89d5dd3bf75f0bc7038d35bf"} Jan 29 12:08:00 crc kubenswrapper[4753]: I0129 12:08:00.950415 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-x8kzr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d753320a-61c2-4c0e-bd48-96d74b352114\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c7a667b7243fc36ff18cb4c017152fff3d5f4eb5e28e306ace44420cd722c5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tgns4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-x8kzr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:08:00Z is after 2025-08-24T17:21:41Z" Jan 29 12:08:00 crc kubenswrapper[4753]: I0129 12:08:00.985804 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vwcjk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a99ab890-0a5a-4abb-86fb-a3731ff6b2c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e921d361518102483679b5ff7387e27858e315d637efb0f18e06215fe00f70a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd5300a3195f0f4d66a2789abcd65b4ade5652b5d05eb08bfd29932b97137464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd5300a3195f0f4d66a2789abcd65b4ade5652b5d05eb08bfd29932b97137464\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d95b548f1e6e754ff4a6f8d68364586d68b03662d0a679197fd0c99c41745691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d95b548f1e6e754ff4a6f8d68364586d68b03662d0a679197fd0c99c41745691\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b8b1c7640bd4881ba79f3471e309b2d1538aea8d0c8e01d3d7cfc3677d95725a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b8b1c7640bd4881ba79f3471e309b2d1538aea8d0c8e01d3d7cfc3677d95725a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0ba51b6e2180da460faa3599850175db9d7410880ba0214425fa275803f36d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0ba51b6e2180da460faa3599850175db9d7410880ba0214425fa275803f36d3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0ed771b481178d6e12c669e122932ef25e1fe09db24c8f025f50a469400af184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0ed771b481178d6e12c669e122932ef25e1fe09db24c8f025f50a469400af184\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1990423fefacfb9b390af88b9af343c1bb4c02014db6bf8226918a7be9f4ad5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1990423fefacfb9b390af88b9af343c1bb4c02014db6bf8226918a7be9f4ad5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t6f7b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:59Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vwcjk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:08:00Z is after 2025-08-24T17:21:41Z" Jan 29 12:08:01 crc kubenswrapper[4753]: I0129 12:08:01.000524 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"acf2e7cc-e85d-43b0-84c4-7d2de575457f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://308d58d7f5a84b4d1980716b231abfe2ea2e9355bff9f5c998584d6986a9a389\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa000ea086031e13fdded4648ef18460b2192b270143a3866a3f448d5a006eb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fa000ea086031e13fdded4648ef18460b2192b270143a3866a3f448d5a006eb5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:08:00Z is after 2025-08-24T17:21:41Z" Jan 29 12:08:01 crc kubenswrapper[4753]: I0129 12:08:01.020007 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"703a6b9d-a15d-4ed3-b00b-db7bd5d42c61\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cab3ab80a8fddbc591cd07fdbed800f525bb8a1b5505bf818ebb59cbcce73003\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7af8be0836a8ff7d08ff48d9487acdedd4b83127538d071d13b3215400c0f8de\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://91d57a9cede640bdcec325175598b72dd00398436b730248728c7d9afd545b1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e86597c91848f265cd70410d57ac60eadba1d0322b9b897097366a41c93d8134\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34151f1c4f743e8fa0a2c27e644bbea9c3911e2252501ad8aa95ad3e4346222a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T12:06:54Z\\\",\\\"message\\\":\\\"ed a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3028279136/tls.crt::/tmp/serving-cert-3028279136/tls.key\\\\\\\"\\\\nI0129 12:06:54.190978 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 12:06:54.198044 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 12:06:54.198078 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 12:06:54.198139 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 12:06:54.198147 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 12:06:54.208089 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 12:06:54.208122 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 12:06:54.208128 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 12:06:54.208132 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 12:06:54.208135 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 12:06:54.208139 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 12:06:54.208143 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 12:06:54.208164 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI0129 12:06:54.212380 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-3028279136/tls.crt::/tmp/serving-cert-3028279136/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1769688397\\\\\\\\\\\\\\\" (2026-01-29 12:06:37 +0000 UTC to 2026-02-28 12:06:38 +0000 UTC (now=2026-01-29 12:06:54.212317535 +0000 UTC))\\\\\\\"\\\\nF0129 12:06:54.212498 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e02d977e34888952623f4ced5e073d7bf1ff360f8deb435dc577d2ecf146cb2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:33Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6538a934d097cd5a295952f64961a0a8fb5bfa5ebcd27a0a4dd3638029b9cdb1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6538a934d097cd5a295952f64961a0a8fb5bfa5ebcd27a0a4dd3638029b9cdb1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:08:01Z is after 2025-08-24T17:21:41Z" Jan 29 12:08:01 crc kubenswrapper[4753]: I0129 12:08:01.022667 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:01 crc kubenswrapper[4753]: I0129 12:08:01.022813 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:01 crc kubenswrapper[4753]: I0129 12:08:01.022846 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:01 crc kubenswrapper[4753]: I0129 12:08:01.022869 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:01 crc kubenswrapper[4753]: I0129 12:08:01.022880 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:01Z","lastTransitionTime":"2026-01-29T12:08:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:01 crc kubenswrapper[4753]: I0129 12:08:01.039302 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:08:01Z is after 2025-08-24T17:21:41Z" Jan 29 12:08:01 crc kubenswrapper[4753]: I0129 12:08:01.057420 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:08:01Z is after 2025-08-24T17:21:41Z" Jan 29 12:08:01 crc kubenswrapper[4753]: I0129 12:08:01.089991 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966eb8e59f82bacf20ebfa1b1d5bf9dc0a9cdb3b430ae83ed9bde106eb3fa521\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea7c92ec02a9cf6ec5fbd8efe22a066dfef9b48f042137964c5dd49578c99079\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:08:01Z is after 2025-08-24T17:21:41Z" Jan 29 12:08:01 crc kubenswrapper[4753]: I0129 12:08:01.114126 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0310995-a7c7-47c3-ae6c-05daaaba92a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e56ac830346c0f85227bcebc9b76c2af8646767d4678934985d7b1f5846dcb82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-txdmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8391a68625fc3d8fdbc4f16b0b7b25e359798d08ce65ad8f482722910873418a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-txdmk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:58Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-7c24x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:08:01Z is after 2025-08-24T17:21:41Z" Jan 29 12:08:01 crc kubenswrapper[4753]: I0129 12:08:01.125744 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:01 crc kubenswrapper[4753]: I0129 12:08:01.125786 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:01 crc kubenswrapper[4753]: I0129 12:08:01.125796 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:01 crc kubenswrapper[4753]: I0129 12:08:01.125811 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:01 crc kubenswrapper[4753]: I0129 12:08:01.125821 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:01Z","lastTransitionTime":"2026-01-29T12:08:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:01 crc kubenswrapper[4753]: I0129 12:08:01.139854 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rnbz9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b372210b-6e1b-4a80-b379-7c1d570712f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:08:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:08:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32065049674d4bf4bc624652e7d77fa28f08e40f89d5dd3bf75f0bc7038d35bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://346c36936603f4535d5ad3e6b60e451b05b4d376e2531c813f0c900c92caf5fd\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T12:07:59Z\\\",\\\"message\\\":\\\"2026-01-29T12:07:13+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_fa04fbd8-83e0-4879-a6bb-30f3a6792b92\\\\n2026-01-29T12:07:13+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_fa04fbd8-83e0-4879-a6bb-30f3a6792b92 to /host/opt/cni/bin/\\\\n2026-01-29T12:07:14Z [verbose] multus-daemon started\\\\n2026-01-29T12:07:14Z [verbose] Readiness Indicator file check\\\\n2026-01-29T12:07:59Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqzp6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:59Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rnbz9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:08:01Z is after 2025-08-24T17:21:41Z" Jan 29 12:08:01 crc kubenswrapper[4753]: I0129 12:08:01.228290 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:01 crc kubenswrapper[4753]: I0129 12:08:01.228333 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:01 crc kubenswrapper[4753]: I0129 12:08:01.228357 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:01 crc kubenswrapper[4753]: I0129 12:08:01.228375 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:01 crc kubenswrapper[4753]: I0129 12:08:01.228385 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:01Z","lastTransitionTime":"2026-01-29T12:08:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:01 crc kubenswrapper[4753]: I0129 12:08:01.230441 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-9dtck" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"132de771-c8b4-403f-8916-8b453e7c6fc3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://30aac9e31bfe76eba96a50a30b03419b0b23ddd888485d609f37b84af28b8d3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gvjpd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://829af7b1f520503a0f36c60dd12b25d45c3831ba0cbb61f4f0a80582cb46da1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gvjpd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:07:21Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-9dtck\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:08:01Z is after 2025-08-24T17:21:41Z" Jan 29 12:08:01 crc kubenswrapper[4753]: I0129 12:08:01.244621 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b108c546-a8c1-4bf9-814b-bfb5871b3013\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9842d9d6fed8c87bc20835964d3271b0be3117fedc658e6ef028db422d1ad478\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4fd4169036d3a646c9e5e4bfa8d86a92c557c3d8f77608ae5308dd0b8c45517\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b4a6bbb431e321e54013c96e3cca1537283cbdf65d89a978780550d603663fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://31bd7db7bddd3e16cb455f7af9fd3a9bca49919c8f1ffb676dec83c543ba7763\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:08:01Z is after 2025-08-24T17:21:41Z" Jan 29 12:08:01 crc kubenswrapper[4753]: I0129 12:08:01.259573 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:55Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:08:01Z is after 2025-08-24T17:21:41Z" Jan 29 12:08:01 crc kubenswrapper[4753]: I0129 12:08:01.276030 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5d7ccf10988fd83d31d42be6f82403067a0cc50313c4d5dcddfb528507efffa4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:08:01Z is after 2025-08-24T17:21:41Z" Jan 29 12:08:01 crc kubenswrapper[4753]: I0129 12:08:01.312947 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80bec2ab-0a88-4818-9339-760edda3b07e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f15016e29f1fc14f51b4d01e772071333fd8a885cacc1804b3a03dbcfc93836\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1e6a7d8df812546f17b3e026005c5062086e97e3fa364b3fa22cefec025e4eba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f71859413676dab4a435d43c11ad7cba315a12de744c3c8b161273f6ad36580c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0ec2a21ec299b5eed363668f39a270a8b03ea79b9de3dd23bda0c30579c3de3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91c1df8b6763320498420463a3841f0bfa4fcf753c355f549c3c1e93bf5206a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://05d1e99596530f467ecdae51aec8db191ac045cd85e121fc9835e2f3688f6ae7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7138397dae5e90b30b0cdec02d29c108d423b3c94b07ea63e1560c4f02ed607\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e7138397dae5e90b30b0cdec02d29c108d423b3c94b07ea63e1560c4f02ed607\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T12:07:43Z\\\",\\\"message\\\":\\\"rnalversions/factory.go:140\\\\nI0129 12:07:43.077687 6477 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0129 12:07:43.077875 6477 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0129 12:07:43.078075 6477 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0129 12:07:43.078370 6477 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0129 12:07:43.078709 6477 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0129 12:07:43.078725 6477 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0129 12:07:43.078784 6477 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0129 12:07:43.078798 6477 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0129 12:07:43.078806 6477 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0129 12:07:43.078847 6477 factory.go:656] Stopping watch factory\\\\nI0129 12:07:43.078862 6477 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0129 12:07:43.078866 6477 ovnkube.go:599] Stopped ovnkube\\\\nI0129 12:07:43.078876 6477 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0129 12\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:41Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-nzkvz_openshift-ovn-kubernetes(80bec2ab-0a88-4818-9339-760edda3b07e)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e40e5b13dfdc135ea09253d466bacdfc5c2ac3d28eabc76d833d71ac300905c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:07:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:07:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nkh6d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:07:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-nzkvz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:08:01Z is after 2025-08-24T17:21:41Z" Jan 29 12:08:01 crc kubenswrapper[4753]: I0129 12:08:01.331549 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:01 crc kubenswrapper[4753]: I0129 12:08:01.331665 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:01 crc kubenswrapper[4753]: I0129 12:08:01.331707 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:01 crc kubenswrapper[4753]: I0129 12:08:01.331754 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:01 crc kubenswrapper[4753]: I0129 12:08:01.331769 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:01Z","lastTransitionTime":"2026-01-29T12:08:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:01 crc kubenswrapper[4753]: I0129 12:08:01.344714 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-p6m5g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2412fa08-e643-4225-b494-eb999ea93fce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zjjxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zjjxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:07:23Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-p6m5g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:08:01Z is after 2025-08-24T17:21:41Z" Jan 29 12:08:01 crc kubenswrapper[4753]: I0129 12:08:01.367065 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c93f64b-e5af-4d6b-83e9-7fdfeb8548ec\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a9ec3e416b3b385c71b2fa28c0215fcb04b708e33fec2d85fdf0a75b848b027\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4fd3a920ea0c98edb2c8ad37d2a1a5d1ed50de5bc8c3e2f4d34c876b2cab54f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8dea62586ba5ac5a8ddc9e75b216a27593c8fe99516abc38c9c411a93842a372\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://44c03bba5a02d879c3b235cd65897fb30651871689c8f02b9526966817f79636\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://44c03bba5a02d879c3b235cd65897fb30651871689c8f02b9526966817f79636\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T12:06:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T12:06:29Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:28Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:08:01Z is after 2025-08-24T17:21:41Z" Jan 29 12:08:01 crc kubenswrapper[4753]: I0129 12:08:01.386407 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7962d88e183df5c4d91f5767f2007068ea2fdfd5c31d9d6ed609c2ab9ca26999\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:06:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:08:01Z is after 2025-08-24T17:21:41Z" Jan 29 12:08:01 crc kubenswrapper[4753]: I0129 12:08:01.410593 4753 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-5m2jf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0203345d-1e9f-4cfe-bde7-90f87221d1a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:06:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T12:07:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fdf3d79084d3e17b1f8cb0b285fd425280f34724c6a441104a2ec52b5217bfdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T12:07:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zzwn6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T12:06:58Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-5m2jf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:08:01Z is after 2025-08-24T17:21:41Z" Jan 29 12:08:01 crc kubenswrapper[4753]: I0129 12:08:01.435349 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:01 crc kubenswrapper[4753]: I0129 12:08:01.435408 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:01 crc kubenswrapper[4753]: I0129 12:08:01.435433 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:01 crc kubenswrapper[4753]: I0129 12:08:01.435475 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:01 crc kubenswrapper[4753]: I0129 12:08:01.435502 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:01Z","lastTransitionTime":"2026-01-29T12:08:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:01 crc kubenswrapper[4753]: I0129 12:08:01.540146 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:01 crc kubenswrapper[4753]: I0129 12:08:01.540193 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:01 crc kubenswrapper[4753]: I0129 12:08:01.540210 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:01 crc kubenswrapper[4753]: I0129 12:08:01.540460 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:01 crc kubenswrapper[4753]: I0129 12:08:01.540484 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:01Z","lastTransitionTime":"2026-01-29T12:08:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:01 crc kubenswrapper[4753]: I0129 12:08:01.645272 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:01 crc kubenswrapper[4753]: I0129 12:08:01.645338 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:01 crc kubenswrapper[4753]: I0129 12:08:01.645354 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:01 crc kubenswrapper[4753]: I0129 12:08:01.645378 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:01 crc kubenswrapper[4753]: I0129 12:08:01.645398 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:01Z","lastTransitionTime":"2026-01-29T12:08:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:01 crc kubenswrapper[4753]: I0129 12:08:01.662919 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-02 12:02:19.39466794 +0000 UTC Jan 29 12:08:01 crc kubenswrapper[4753]: I0129 12:08:01.749016 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:01 crc kubenswrapper[4753]: I0129 12:08:01.749088 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:01 crc kubenswrapper[4753]: I0129 12:08:01.749101 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:01 crc kubenswrapper[4753]: I0129 12:08:01.749121 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:01 crc kubenswrapper[4753]: I0129 12:08:01.749135 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:01Z","lastTransitionTime":"2026-01-29T12:08:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:01 crc kubenswrapper[4753]: I0129 12:08:01.853628 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:01 crc kubenswrapper[4753]: I0129 12:08:01.853715 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:01 crc kubenswrapper[4753]: I0129 12:08:01.853736 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:01 crc kubenswrapper[4753]: I0129 12:08:01.853766 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:01 crc kubenswrapper[4753]: I0129 12:08:01.853785 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:01Z","lastTransitionTime":"2026-01-29T12:08:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:01 crc kubenswrapper[4753]: I0129 12:08:01.888113 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 12:08:01 crc kubenswrapper[4753]: E0129 12:08:01.888438 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 12:08:01 crc kubenswrapper[4753]: I0129 12:08:01.957262 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:01 crc kubenswrapper[4753]: I0129 12:08:01.957319 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:01 crc kubenswrapper[4753]: I0129 12:08:01.957344 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:01 crc kubenswrapper[4753]: I0129 12:08:01.957378 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:01 crc kubenswrapper[4753]: I0129 12:08:01.957394 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:01Z","lastTransitionTime":"2026-01-29T12:08:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:02 crc kubenswrapper[4753]: I0129 12:08:02.060414 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:02 crc kubenswrapper[4753]: I0129 12:08:02.060464 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:02 crc kubenswrapper[4753]: I0129 12:08:02.060480 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:02 crc kubenswrapper[4753]: I0129 12:08:02.060501 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:02 crc kubenswrapper[4753]: I0129 12:08:02.060513 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:02Z","lastTransitionTime":"2026-01-29T12:08:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:02 crc kubenswrapper[4753]: I0129 12:08:02.163680 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:02 crc kubenswrapper[4753]: I0129 12:08:02.163740 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:02 crc kubenswrapper[4753]: I0129 12:08:02.163756 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:02 crc kubenswrapper[4753]: I0129 12:08:02.163781 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:02 crc kubenswrapper[4753]: I0129 12:08:02.163794 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:02Z","lastTransitionTime":"2026-01-29T12:08:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:02 crc kubenswrapper[4753]: I0129 12:08:02.266495 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:02 crc kubenswrapper[4753]: I0129 12:08:02.266549 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:02 crc kubenswrapper[4753]: I0129 12:08:02.266561 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:02 crc kubenswrapper[4753]: I0129 12:08:02.266582 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:02 crc kubenswrapper[4753]: I0129 12:08:02.266595 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:02Z","lastTransitionTime":"2026-01-29T12:08:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:02 crc kubenswrapper[4753]: I0129 12:08:02.369618 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:02 crc kubenswrapper[4753]: I0129 12:08:02.369721 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:02 crc kubenswrapper[4753]: I0129 12:08:02.369743 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:02 crc kubenswrapper[4753]: I0129 12:08:02.369767 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:02 crc kubenswrapper[4753]: I0129 12:08:02.369783 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:02Z","lastTransitionTime":"2026-01-29T12:08:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:02 crc kubenswrapper[4753]: I0129 12:08:02.472840 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:02 crc kubenswrapper[4753]: I0129 12:08:02.472900 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:02 crc kubenswrapper[4753]: I0129 12:08:02.472912 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:02 crc kubenswrapper[4753]: I0129 12:08:02.472933 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:02 crc kubenswrapper[4753]: I0129 12:08:02.472950 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:02Z","lastTransitionTime":"2026-01-29T12:08:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:02 crc kubenswrapper[4753]: I0129 12:08:02.576449 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:02 crc kubenswrapper[4753]: I0129 12:08:02.576495 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:02 crc kubenswrapper[4753]: I0129 12:08:02.576508 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:02 crc kubenswrapper[4753]: I0129 12:08:02.576526 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:02 crc kubenswrapper[4753]: I0129 12:08:02.576538 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:02Z","lastTransitionTime":"2026-01-29T12:08:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:02 crc kubenswrapper[4753]: I0129 12:08:02.664101 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-10 05:33:19.028800452 +0000 UTC Jan 29 12:08:02 crc kubenswrapper[4753]: I0129 12:08:02.679126 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:02 crc kubenswrapper[4753]: I0129 12:08:02.679170 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:02 crc kubenswrapper[4753]: I0129 12:08:02.679180 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:02 crc kubenswrapper[4753]: I0129 12:08:02.679201 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:02 crc kubenswrapper[4753]: I0129 12:08:02.679211 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:02Z","lastTransitionTime":"2026-01-29T12:08:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:02 crc kubenswrapper[4753]: I0129 12:08:02.781845 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:02 crc kubenswrapper[4753]: I0129 12:08:02.781896 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:02 crc kubenswrapper[4753]: I0129 12:08:02.781907 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:02 crc kubenswrapper[4753]: I0129 12:08:02.781925 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:02 crc kubenswrapper[4753]: I0129 12:08:02.781938 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:02Z","lastTransitionTime":"2026-01-29T12:08:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:02 crc kubenswrapper[4753]: I0129 12:08:02.885542 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:02 crc kubenswrapper[4753]: I0129 12:08:02.885620 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:02 crc kubenswrapper[4753]: I0129 12:08:02.885639 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:02 crc kubenswrapper[4753]: I0129 12:08:02.885667 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:02 crc kubenswrapper[4753]: I0129 12:08:02.885682 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:02Z","lastTransitionTime":"2026-01-29T12:08:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:02 crc kubenswrapper[4753]: I0129 12:08:02.887938 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-p6m5g" Jan 29 12:08:02 crc kubenswrapper[4753]: I0129 12:08:02.887948 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 12:08:02 crc kubenswrapper[4753]: E0129 12:08:02.888114 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-p6m5g" podUID="2412fa08-e643-4225-b494-eb999ea93fce" Jan 29 12:08:02 crc kubenswrapper[4753]: E0129 12:08:02.888183 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 12:08:02 crc kubenswrapper[4753]: I0129 12:08:02.887953 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 12:08:02 crc kubenswrapper[4753]: E0129 12:08:02.888375 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 12:08:02 crc kubenswrapper[4753]: I0129 12:08:02.989191 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:02 crc kubenswrapper[4753]: I0129 12:08:02.989271 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:02 crc kubenswrapper[4753]: I0129 12:08:02.989286 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:02 crc kubenswrapper[4753]: I0129 12:08:02.989306 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:02 crc kubenswrapper[4753]: I0129 12:08:02.989319 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:02Z","lastTransitionTime":"2026-01-29T12:08:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:03 crc kubenswrapper[4753]: I0129 12:08:03.092680 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:03 crc kubenswrapper[4753]: I0129 12:08:03.093203 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:03 crc kubenswrapper[4753]: I0129 12:08:03.093321 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:03 crc kubenswrapper[4753]: I0129 12:08:03.093406 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:03 crc kubenswrapper[4753]: I0129 12:08:03.093493 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:03Z","lastTransitionTime":"2026-01-29T12:08:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:03 crc kubenswrapper[4753]: I0129 12:08:03.196818 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:03 crc kubenswrapper[4753]: I0129 12:08:03.196889 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:03 crc kubenswrapper[4753]: I0129 12:08:03.196902 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:03 crc kubenswrapper[4753]: I0129 12:08:03.196922 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:03 crc kubenswrapper[4753]: I0129 12:08:03.196932 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:03Z","lastTransitionTime":"2026-01-29T12:08:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:03 crc kubenswrapper[4753]: I0129 12:08:03.300218 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:03 crc kubenswrapper[4753]: I0129 12:08:03.300594 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:03 crc kubenswrapper[4753]: I0129 12:08:03.300717 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:03 crc kubenswrapper[4753]: I0129 12:08:03.300828 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:03 crc kubenswrapper[4753]: I0129 12:08:03.300919 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:03Z","lastTransitionTime":"2026-01-29T12:08:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:03 crc kubenswrapper[4753]: I0129 12:08:03.403585 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:03 crc kubenswrapper[4753]: I0129 12:08:03.404006 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:03 crc kubenswrapper[4753]: I0129 12:08:03.404142 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:03 crc kubenswrapper[4753]: I0129 12:08:03.404298 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:03 crc kubenswrapper[4753]: I0129 12:08:03.404465 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:03Z","lastTransitionTime":"2026-01-29T12:08:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:03 crc kubenswrapper[4753]: I0129 12:08:03.508287 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:03 crc kubenswrapper[4753]: I0129 12:08:03.508356 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:03 crc kubenswrapper[4753]: I0129 12:08:03.508370 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:03 crc kubenswrapper[4753]: I0129 12:08:03.508388 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:03 crc kubenswrapper[4753]: I0129 12:08:03.508399 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:03Z","lastTransitionTime":"2026-01-29T12:08:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:03 crc kubenswrapper[4753]: I0129 12:08:03.611101 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:03 crc kubenswrapper[4753]: I0129 12:08:03.611770 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:03 crc kubenswrapper[4753]: I0129 12:08:03.611847 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:03 crc kubenswrapper[4753]: I0129 12:08:03.611923 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:03 crc kubenswrapper[4753]: I0129 12:08:03.611998 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:03Z","lastTransitionTime":"2026-01-29T12:08:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:03 crc kubenswrapper[4753]: I0129 12:08:03.665159 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-13 22:54:35.137418204 +0000 UTC Jan 29 12:08:03 crc kubenswrapper[4753]: I0129 12:08:03.714251 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:03 crc kubenswrapper[4753]: I0129 12:08:03.714294 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:03 crc kubenswrapper[4753]: I0129 12:08:03.714307 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:03 crc kubenswrapper[4753]: I0129 12:08:03.714327 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:03 crc kubenswrapper[4753]: I0129 12:08:03.714338 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:03Z","lastTransitionTime":"2026-01-29T12:08:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:03 crc kubenswrapper[4753]: I0129 12:08:03.816397 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:03 crc kubenswrapper[4753]: I0129 12:08:03.816431 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:03 crc kubenswrapper[4753]: I0129 12:08:03.816442 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:03 crc kubenswrapper[4753]: I0129 12:08:03.816461 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:03 crc kubenswrapper[4753]: I0129 12:08:03.816471 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:03Z","lastTransitionTime":"2026-01-29T12:08:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:03 crc kubenswrapper[4753]: I0129 12:08:03.889002 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 12:08:03 crc kubenswrapper[4753]: E0129 12:08:03.889366 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 12:08:03 crc kubenswrapper[4753]: I0129 12:08:03.920045 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:03 crc kubenswrapper[4753]: I0129 12:08:03.920119 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:03 crc kubenswrapper[4753]: I0129 12:08:03.920130 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:03 crc kubenswrapper[4753]: I0129 12:08:03.920151 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:03 crc kubenswrapper[4753]: I0129 12:08:03.920164 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:03Z","lastTransitionTime":"2026-01-29T12:08:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:04 crc kubenswrapper[4753]: I0129 12:08:04.023321 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:04 crc kubenswrapper[4753]: I0129 12:08:04.023421 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:04 crc kubenswrapper[4753]: I0129 12:08:04.023435 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:04 crc kubenswrapper[4753]: I0129 12:08:04.023479 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:04 crc kubenswrapper[4753]: I0129 12:08:04.023492 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:04Z","lastTransitionTime":"2026-01-29T12:08:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:04 crc kubenswrapper[4753]: I0129 12:08:04.127148 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:04 crc kubenswrapper[4753]: I0129 12:08:04.127263 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:04 crc kubenswrapper[4753]: I0129 12:08:04.127291 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:04 crc kubenswrapper[4753]: I0129 12:08:04.127323 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:04 crc kubenswrapper[4753]: I0129 12:08:04.127344 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:04Z","lastTransitionTime":"2026-01-29T12:08:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:04 crc kubenswrapper[4753]: I0129 12:08:04.473867 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:04 crc kubenswrapper[4753]: I0129 12:08:04.473944 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:04 crc kubenswrapper[4753]: I0129 12:08:04.473959 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:04 crc kubenswrapper[4753]: I0129 12:08:04.473980 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:04 crc kubenswrapper[4753]: I0129 12:08:04.474010 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:04Z","lastTransitionTime":"2026-01-29T12:08:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:04 crc kubenswrapper[4753]: I0129 12:08:04.475944 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:04 crc kubenswrapper[4753]: I0129 12:08:04.476093 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:04 crc kubenswrapper[4753]: I0129 12:08:04.476197 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:04 crc kubenswrapper[4753]: I0129 12:08:04.476310 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:04 crc kubenswrapper[4753]: I0129 12:08:04.476417 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:04Z","lastTransitionTime":"2026-01-29T12:08:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:04 crc kubenswrapper[4753]: E0129 12:08:04.493455 4753 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:08:04Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:08:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:08:04Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:08:04Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:08:04Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:08:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:08:04Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:08:04Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7536a030-b66d-4df9-a1ec-9890f7ad99e7\\\",\\\"systemUUID\\\":\\\"ce89c1ce-259c-4b78-b348-3ef96afb6944\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:08:04Z is after 2025-08-24T17:21:41Z" Jan 29 12:08:04 crc kubenswrapper[4753]: I0129 12:08:04.498454 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:04 crc kubenswrapper[4753]: I0129 12:08:04.498518 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:04 crc kubenswrapper[4753]: I0129 12:08:04.498535 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:04 crc kubenswrapper[4753]: I0129 12:08:04.498557 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:04 crc kubenswrapper[4753]: I0129 12:08:04.498567 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:04Z","lastTransitionTime":"2026-01-29T12:08:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:04 crc kubenswrapper[4753]: E0129 12:08:04.517138 4753 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:08:04Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:08:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:08:04Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:08:04Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:08:04Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:08:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:08:04Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:08:04Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7536a030-b66d-4df9-a1ec-9890f7ad99e7\\\",\\\"systemUUID\\\":\\\"ce89c1ce-259c-4b78-b348-3ef96afb6944\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:08:04Z is after 2025-08-24T17:21:41Z" Jan 29 12:08:04 crc kubenswrapper[4753]: I0129 12:08:04.522253 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:04 crc kubenswrapper[4753]: I0129 12:08:04.522309 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:04 crc kubenswrapper[4753]: I0129 12:08:04.522319 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:04 crc kubenswrapper[4753]: I0129 12:08:04.522339 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:04 crc kubenswrapper[4753]: I0129 12:08:04.522350 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:04Z","lastTransitionTime":"2026-01-29T12:08:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:04 crc kubenswrapper[4753]: E0129 12:08:04.538579 4753 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:08:04Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:08:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:08:04Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:08:04Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:08:04Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:08:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:08:04Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:08:04Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7536a030-b66d-4df9-a1ec-9890f7ad99e7\\\",\\\"systemUUID\\\":\\\"ce89c1ce-259c-4b78-b348-3ef96afb6944\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:08:04Z is after 2025-08-24T17:21:41Z" Jan 29 12:08:04 crc kubenswrapper[4753]: I0129 12:08:04.607320 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:04 crc kubenswrapper[4753]: I0129 12:08:04.607725 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:04 crc kubenswrapper[4753]: I0129 12:08:04.607738 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:04 crc kubenswrapper[4753]: I0129 12:08:04.607761 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:04 crc kubenswrapper[4753]: I0129 12:08:04.607776 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:04Z","lastTransitionTime":"2026-01-29T12:08:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:04 crc kubenswrapper[4753]: E0129 12:08:04.624348 4753 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:08:04Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:08:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:08:04Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:08:04Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:08:04Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:08:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:08:04Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:08:04Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7536a030-b66d-4df9-a1ec-9890f7ad99e7\\\",\\\"systemUUID\\\":\\\"ce89c1ce-259c-4b78-b348-3ef96afb6944\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:08:04Z is after 2025-08-24T17:21:41Z" Jan 29 12:08:04 crc kubenswrapper[4753]: I0129 12:08:04.629324 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:04 crc kubenswrapper[4753]: I0129 12:08:04.629559 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:04 crc kubenswrapper[4753]: I0129 12:08:04.629662 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:04 crc kubenswrapper[4753]: I0129 12:08:04.629760 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:04 crc kubenswrapper[4753]: I0129 12:08:04.629861 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:04Z","lastTransitionTime":"2026-01-29T12:08:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:04 crc kubenswrapper[4753]: E0129 12:08:04.645420 4753 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:08:04Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:08:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:08:04Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:08:04Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:08:04Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:08:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T12:08:04Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T12:08:04Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7536a030-b66d-4df9-a1ec-9890f7ad99e7\\\",\\\"systemUUID\\\":\\\"ce89c1ce-259c-4b78-b348-3ef96afb6944\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T12:08:04Z is after 2025-08-24T17:21:41Z" Jan 29 12:08:04 crc kubenswrapper[4753]: E0129 12:08:04.646151 4753 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 29 12:08:04 crc kubenswrapper[4753]: I0129 12:08:04.648346 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:04 crc kubenswrapper[4753]: I0129 12:08:04.648516 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:04 crc kubenswrapper[4753]: I0129 12:08:04.648674 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:04 crc kubenswrapper[4753]: I0129 12:08:04.648816 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:04 crc kubenswrapper[4753]: I0129 12:08:04.648982 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:04Z","lastTransitionTime":"2026-01-29T12:08:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:04 crc kubenswrapper[4753]: I0129 12:08:04.666001 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-03 17:56:09.795039589 +0000 UTC Jan 29 12:08:04 crc kubenswrapper[4753]: I0129 12:08:04.752697 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:04 crc kubenswrapper[4753]: I0129 12:08:04.753118 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:04 crc kubenswrapper[4753]: I0129 12:08:04.753282 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:04 crc kubenswrapper[4753]: I0129 12:08:04.753444 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:04 crc kubenswrapper[4753]: I0129 12:08:04.753605 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:04Z","lastTransitionTime":"2026-01-29T12:08:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:04 crc kubenswrapper[4753]: I0129 12:08:04.856169 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:04 crc kubenswrapper[4753]: I0129 12:08:04.856213 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:04 crc kubenswrapper[4753]: I0129 12:08:04.856236 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:04 crc kubenswrapper[4753]: I0129 12:08:04.856251 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:04 crc kubenswrapper[4753]: I0129 12:08:04.856260 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:04Z","lastTransitionTime":"2026-01-29T12:08:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:04 crc kubenswrapper[4753]: I0129 12:08:04.888439 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 12:08:04 crc kubenswrapper[4753]: I0129 12:08:04.888439 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-p6m5g" Jan 29 12:08:04 crc kubenswrapper[4753]: I0129 12:08:04.888609 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 12:08:04 crc kubenswrapper[4753]: E0129 12:08:04.888820 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 12:08:04 crc kubenswrapper[4753]: E0129 12:08:04.888979 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-p6m5g" podUID="2412fa08-e643-4225-b494-eb999ea93fce" Jan 29 12:08:04 crc kubenswrapper[4753]: E0129 12:08:04.889036 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 12:08:04 crc kubenswrapper[4753]: I0129 12:08:04.960609 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:04 crc kubenswrapper[4753]: I0129 12:08:04.961085 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:04 crc kubenswrapper[4753]: I0129 12:08:04.961269 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:04 crc kubenswrapper[4753]: I0129 12:08:04.961432 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:04 crc kubenswrapper[4753]: I0129 12:08:04.961562 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:04Z","lastTransitionTime":"2026-01-29T12:08:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:05 crc kubenswrapper[4753]: I0129 12:08:05.065409 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:05 crc kubenswrapper[4753]: I0129 12:08:05.065469 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:05 crc kubenswrapper[4753]: I0129 12:08:05.065483 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:05 crc kubenswrapper[4753]: I0129 12:08:05.065505 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:05 crc kubenswrapper[4753]: I0129 12:08:05.065519 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:05Z","lastTransitionTime":"2026-01-29T12:08:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:05 crc kubenswrapper[4753]: I0129 12:08:05.170440 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:05 crc kubenswrapper[4753]: I0129 12:08:05.170524 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:05 crc kubenswrapper[4753]: I0129 12:08:05.170549 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:05 crc kubenswrapper[4753]: I0129 12:08:05.170588 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:05 crc kubenswrapper[4753]: I0129 12:08:05.170617 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:05Z","lastTransitionTime":"2026-01-29T12:08:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:05 crc kubenswrapper[4753]: I0129 12:08:05.273551 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:05 crc kubenswrapper[4753]: I0129 12:08:05.273602 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:05 crc kubenswrapper[4753]: I0129 12:08:05.273619 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:05 crc kubenswrapper[4753]: I0129 12:08:05.273646 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:05 crc kubenswrapper[4753]: I0129 12:08:05.273667 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:05Z","lastTransitionTime":"2026-01-29T12:08:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:05 crc kubenswrapper[4753]: I0129 12:08:05.376746 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:05 crc kubenswrapper[4753]: I0129 12:08:05.376794 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:05 crc kubenswrapper[4753]: I0129 12:08:05.376806 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:05 crc kubenswrapper[4753]: I0129 12:08:05.376826 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:05 crc kubenswrapper[4753]: I0129 12:08:05.376839 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:05Z","lastTransitionTime":"2026-01-29T12:08:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:05 crc kubenswrapper[4753]: I0129 12:08:05.479869 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:05 crc kubenswrapper[4753]: I0129 12:08:05.479928 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:05 crc kubenswrapper[4753]: I0129 12:08:05.479940 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:05 crc kubenswrapper[4753]: I0129 12:08:05.479960 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:05 crc kubenswrapper[4753]: I0129 12:08:05.479976 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:05Z","lastTransitionTime":"2026-01-29T12:08:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:05 crc kubenswrapper[4753]: I0129 12:08:05.582757 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:05 crc kubenswrapper[4753]: I0129 12:08:05.582824 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:05 crc kubenswrapper[4753]: I0129 12:08:05.582841 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:05 crc kubenswrapper[4753]: I0129 12:08:05.582866 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:05 crc kubenswrapper[4753]: I0129 12:08:05.582886 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:05Z","lastTransitionTime":"2026-01-29T12:08:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:05 crc kubenswrapper[4753]: I0129 12:08:05.666984 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-17 10:40:28.833626197 +0000 UTC Jan 29 12:08:05 crc kubenswrapper[4753]: I0129 12:08:05.685093 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:05 crc kubenswrapper[4753]: I0129 12:08:05.685137 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:05 crc kubenswrapper[4753]: I0129 12:08:05.685147 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:05 crc kubenswrapper[4753]: I0129 12:08:05.685165 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:05 crc kubenswrapper[4753]: I0129 12:08:05.685177 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:05Z","lastTransitionTime":"2026-01-29T12:08:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:05 crc kubenswrapper[4753]: I0129 12:08:05.789026 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:05 crc kubenswrapper[4753]: I0129 12:08:05.789120 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:05 crc kubenswrapper[4753]: I0129 12:08:05.789158 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:05 crc kubenswrapper[4753]: I0129 12:08:05.789193 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:05 crc kubenswrapper[4753]: I0129 12:08:05.789211 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:05Z","lastTransitionTime":"2026-01-29T12:08:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:05 crc kubenswrapper[4753]: I0129 12:08:05.888545 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 12:08:05 crc kubenswrapper[4753]: E0129 12:08:05.888910 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 12:08:05 crc kubenswrapper[4753]: I0129 12:08:05.892789 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:05 crc kubenswrapper[4753]: I0129 12:08:05.892842 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:05 crc kubenswrapper[4753]: I0129 12:08:05.892855 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:05 crc kubenswrapper[4753]: I0129 12:08:05.892873 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:05 crc kubenswrapper[4753]: I0129 12:08:05.892890 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:05Z","lastTransitionTime":"2026-01-29T12:08:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:05 crc kubenswrapper[4753]: I0129 12:08:05.995842 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:05 crc kubenswrapper[4753]: I0129 12:08:05.995907 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:05 crc kubenswrapper[4753]: I0129 12:08:05.995927 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:05 crc kubenswrapper[4753]: I0129 12:08:05.995950 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:05 crc kubenswrapper[4753]: I0129 12:08:05.995963 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:05Z","lastTransitionTime":"2026-01-29T12:08:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:06 crc kubenswrapper[4753]: I0129 12:08:06.098953 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:06 crc kubenswrapper[4753]: I0129 12:08:06.098999 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:06 crc kubenswrapper[4753]: I0129 12:08:06.099023 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:06 crc kubenswrapper[4753]: I0129 12:08:06.099042 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:06 crc kubenswrapper[4753]: I0129 12:08:06.099051 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:06Z","lastTransitionTime":"2026-01-29T12:08:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:06 crc kubenswrapper[4753]: I0129 12:08:06.202735 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:06 crc kubenswrapper[4753]: I0129 12:08:06.202806 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:06 crc kubenswrapper[4753]: I0129 12:08:06.202817 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:06 crc kubenswrapper[4753]: I0129 12:08:06.202836 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:06 crc kubenswrapper[4753]: I0129 12:08:06.202855 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:06Z","lastTransitionTime":"2026-01-29T12:08:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:06 crc kubenswrapper[4753]: I0129 12:08:06.305829 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:06 crc kubenswrapper[4753]: I0129 12:08:06.305877 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:06 crc kubenswrapper[4753]: I0129 12:08:06.305886 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:06 crc kubenswrapper[4753]: I0129 12:08:06.305903 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:06 crc kubenswrapper[4753]: I0129 12:08:06.305913 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:06Z","lastTransitionTime":"2026-01-29T12:08:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:06 crc kubenswrapper[4753]: I0129 12:08:06.409135 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:06 crc kubenswrapper[4753]: I0129 12:08:06.409206 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:06 crc kubenswrapper[4753]: I0129 12:08:06.409256 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:06 crc kubenswrapper[4753]: I0129 12:08:06.409280 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:06 crc kubenswrapper[4753]: I0129 12:08:06.409294 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:06Z","lastTransitionTime":"2026-01-29T12:08:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:06 crc kubenswrapper[4753]: I0129 12:08:06.513374 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:06 crc kubenswrapper[4753]: I0129 12:08:06.513432 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:06 crc kubenswrapper[4753]: I0129 12:08:06.513443 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:06 crc kubenswrapper[4753]: I0129 12:08:06.513460 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:06 crc kubenswrapper[4753]: I0129 12:08:06.513469 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:06Z","lastTransitionTime":"2026-01-29T12:08:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:06 crc kubenswrapper[4753]: I0129 12:08:06.616099 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:06 crc kubenswrapper[4753]: I0129 12:08:06.616142 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:06 crc kubenswrapper[4753]: I0129 12:08:06.616153 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:06 crc kubenswrapper[4753]: I0129 12:08:06.616170 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:06 crc kubenswrapper[4753]: I0129 12:08:06.616181 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:06Z","lastTransitionTime":"2026-01-29T12:08:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:06 crc kubenswrapper[4753]: I0129 12:08:06.667601 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-26 19:36:23.192967133 +0000 UTC Jan 29 12:08:06 crc kubenswrapper[4753]: I0129 12:08:06.719804 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:06 crc kubenswrapper[4753]: I0129 12:08:06.719868 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:06 crc kubenswrapper[4753]: I0129 12:08:06.719884 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:06 crc kubenswrapper[4753]: I0129 12:08:06.719912 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:06 crc kubenswrapper[4753]: I0129 12:08:06.719964 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:06Z","lastTransitionTime":"2026-01-29T12:08:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:06 crc kubenswrapper[4753]: I0129 12:08:06.822092 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:06 crc kubenswrapper[4753]: I0129 12:08:06.822146 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:06 crc kubenswrapper[4753]: I0129 12:08:06.822162 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:06 crc kubenswrapper[4753]: I0129 12:08:06.822180 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:06 crc kubenswrapper[4753]: I0129 12:08:06.822193 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:06Z","lastTransitionTime":"2026-01-29T12:08:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:06 crc kubenswrapper[4753]: I0129 12:08:06.888025 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 12:08:06 crc kubenswrapper[4753]: I0129 12:08:06.888102 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 12:08:06 crc kubenswrapper[4753]: I0129 12:08:06.888176 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-p6m5g" Jan 29 12:08:06 crc kubenswrapper[4753]: E0129 12:08:06.888361 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 12:08:06 crc kubenswrapper[4753]: E0129 12:08:06.888484 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 12:08:06 crc kubenswrapper[4753]: E0129 12:08:06.888626 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-p6m5g" podUID="2412fa08-e643-4225-b494-eb999ea93fce" Jan 29 12:08:06 crc kubenswrapper[4753]: I0129 12:08:06.924844 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:06 crc kubenswrapper[4753]: I0129 12:08:06.924879 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:06 crc kubenswrapper[4753]: I0129 12:08:06.924888 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:06 crc kubenswrapper[4753]: I0129 12:08:06.924904 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:06 crc kubenswrapper[4753]: I0129 12:08:06.924914 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:06Z","lastTransitionTime":"2026-01-29T12:08:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:07 crc kubenswrapper[4753]: I0129 12:08:07.028479 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:07 crc kubenswrapper[4753]: I0129 12:08:07.028561 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:07 crc kubenswrapper[4753]: I0129 12:08:07.028578 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:07 crc kubenswrapper[4753]: I0129 12:08:07.028602 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:07 crc kubenswrapper[4753]: I0129 12:08:07.028617 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:07Z","lastTransitionTime":"2026-01-29T12:08:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:07 crc kubenswrapper[4753]: I0129 12:08:07.131216 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:07 crc kubenswrapper[4753]: I0129 12:08:07.131317 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:07 crc kubenswrapper[4753]: I0129 12:08:07.131331 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:07 crc kubenswrapper[4753]: I0129 12:08:07.131352 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:07 crc kubenswrapper[4753]: I0129 12:08:07.131367 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:07Z","lastTransitionTime":"2026-01-29T12:08:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:07 crc kubenswrapper[4753]: I0129 12:08:07.234715 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:07 crc kubenswrapper[4753]: I0129 12:08:07.234807 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:07 crc kubenswrapper[4753]: I0129 12:08:07.234833 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:07 crc kubenswrapper[4753]: I0129 12:08:07.234859 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:07 crc kubenswrapper[4753]: I0129 12:08:07.234876 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:07Z","lastTransitionTime":"2026-01-29T12:08:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:07 crc kubenswrapper[4753]: I0129 12:08:07.337943 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:07 crc kubenswrapper[4753]: I0129 12:08:07.337990 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:07 crc kubenswrapper[4753]: I0129 12:08:07.338001 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:07 crc kubenswrapper[4753]: I0129 12:08:07.338019 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:07 crc kubenswrapper[4753]: I0129 12:08:07.338031 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:07Z","lastTransitionTime":"2026-01-29T12:08:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:07 crc kubenswrapper[4753]: I0129 12:08:07.441520 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:07 crc kubenswrapper[4753]: I0129 12:08:07.441566 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:07 crc kubenswrapper[4753]: I0129 12:08:07.441576 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:07 crc kubenswrapper[4753]: I0129 12:08:07.441593 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:07 crc kubenswrapper[4753]: I0129 12:08:07.441604 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:07Z","lastTransitionTime":"2026-01-29T12:08:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:07 crc kubenswrapper[4753]: I0129 12:08:07.544208 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:07 crc kubenswrapper[4753]: I0129 12:08:07.544295 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:07 crc kubenswrapper[4753]: I0129 12:08:07.544311 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:07 crc kubenswrapper[4753]: I0129 12:08:07.544332 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:07 crc kubenswrapper[4753]: I0129 12:08:07.544343 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:07Z","lastTransitionTime":"2026-01-29T12:08:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:07 crc kubenswrapper[4753]: I0129 12:08:07.646269 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:07 crc kubenswrapper[4753]: I0129 12:08:07.646310 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:07 crc kubenswrapper[4753]: I0129 12:08:07.646320 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:07 crc kubenswrapper[4753]: I0129 12:08:07.646339 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:07 crc kubenswrapper[4753]: I0129 12:08:07.646349 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:07Z","lastTransitionTime":"2026-01-29T12:08:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:07 crc kubenswrapper[4753]: I0129 12:08:07.668561 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-29 02:12:49.56647046 +0000 UTC Jan 29 12:08:07 crc kubenswrapper[4753]: I0129 12:08:07.749032 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:07 crc kubenswrapper[4753]: I0129 12:08:07.749087 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:07 crc kubenswrapper[4753]: I0129 12:08:07.749095 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:07 crc kubenswrapper[4753]: I0129 12:08:07.749114 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:07 crc kubenswrapper[4753]: I0129 12:08:07.749123 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:07Z","lastTransitionTime":"2026-01-29T12:08:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:07 crc kubenswrapper[4753]: I0129 12:08:07.851766 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:07 crc kubenswrapper[4753]: I0129 12:08:07.851803 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:07 crc kubenswrapper[4753]: I0129 12:08:07.851812 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:07 crc kubenswrapper[4753]: I0129 12:08:07.851826 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:07 crc kubenswrapper[4753]: I0129 12:08:07.851835 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:07Z","lastTransitionTime":"2026-01-29T12:08:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:07 crc kubenswrapper[4753]: I0129 12:08:07.888277 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 12:08:07 crc kubenswrapper[4753]: E0129 12:08:07.888437 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 12:08:07 crc kubenswrapper[4753]: I0129 12:08:07.944921 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podStartSLOduration=42.944873923 podStartE2EDuration="42.944873923s" podCreationTimestamp="2026-01-29 12:07:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:08:07.920716069 +0000 UTC m=+102.172797534" watchObservedRunningTime="2026-01-29 12:08:07.944873923 +0000 UTC m=+102.196955378" Jan 29 12:08:07 crc kubenswrapper[4753]: I0129 12:08:07.954114 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:07 crc kubenswrapper[4753]: I0129 12:08:07.954157 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:07 crc kubenswrapper[4753]: I0129 12:08:07.954167 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:07 crc kubenswrapper[4753]: I0129 12:08:07.954183 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:07 crc kubenswrapper[4753]: I0129 12:08:07.954193 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:07Z","lastTransitionTime":"2026-01-29T12:08:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:07 crc kubenswrapper[4753]: I0129 12:08:07.962092 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/node-resolver-5m2jf" podStartSLOduration=70.962055601 podStartE2EDuration="1m10.962055601s" podCreationTimestamp="2026-01-29 12:06:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:08:07.961160405 +0000 UTC m=+102.213241860" watchObservedRunningTime="2026-01-29 12:08:07.962055601 +0000 UTC m=+102.214137056" Jan 29 12:08:07 crc kubenswrapper[4753]: I0129 12:08:07.996423 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" podStartSLOduration=18.996383606 podStartE2EDuration="18.996383606s" podCreationTimestamp="2026-01-29 12:07:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:08:07.995525561 +0000 UTC m=+102.247607036" watchObservedRunningTime="2026-01-29 12:08:07.996383606 +0000 UTC m=+102.248465081" Jan 29 12:08:08 crc kubenswrapper[4753]: I0129 12:08:08.025557 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=74.025525097 podStartE2EDuration="1m14.025525097s" podCreationTimestamp="2026-01-29 12:06:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:08:08.022943271 +0000 UTC m=+102.275024746" watchObservedRunningTime="2026-01-29 12:08:08.025525097 +0000 UTC m=+102.277606552" Jan 29 12:08:08 crc kubenswrapper[4753]: I0129 12:08:08.058558 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:08 crc kubenswrapper[4753]: I0129 12:08:08.058607 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:08 crc kubenswrapper[4753]: I0129 12:08:08.058616 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:08 crc kubenswrapper[4753]: I0129 12:08:08.058636 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:08 crc kubenswrapper[4753]: I0129 12:08:08.058647 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:08Z","lastTransitionTime":"2026-01-29T12:08:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:08 crc kubenswrapper[4753]: I0129 12:08:08.094127 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/node-ca-x8kzr" podStartSLOduration=71.094100555 podStartE2EDuration="1m11.094100555s" podCreationTimestamp="2026-01-29 12:06:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:08:08.070009392 +0000 UTC m=+102.322091067" watchObservedRunningTime="2026-01-29 12:08:08.094100555 +0000 UTC m=+102.346182010" Jan 29 12:08:08 crc kubenswrapper[4753]: I0129 12:08:08.094640 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-additional-cni-plugins-vwcjk" podStartSLOduration=70.09463201 podStartE2EDuration="1m10.09463201s" podCreationTimestamp="2026-01-29 12:06:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:08:08.093842877 +0000 UTC m=+102.345924332" watchObservedRunningTime="2026-01-29 12:08:08.09463201 +0000 UTC m=+102.346713465" Jan 29 12:08:08 crc kubenswrapper[4753]: I0129 12:08:08.118819 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-rnbz9" podStartSLOduration=70.118789854 podStartE2EDuration="1m10.118789854s" podCreationTimestamp="2026-01-29 12:06:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:08:08.117365272 +0000 UTC m=+102.369446727" watchObservedRunningTime="2026-01-29 12:08:08.118789854 +0000 UTC m=+102.370871309" Jan 29 12:08:08 crc kubenswrapper[4753]: I0129 12:08:08.162265 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:08 crc kubenswrapper[4753]: I0129 12:08:08.162330 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:08 crc kubenswrapper[4753]: I0129 12:08:08.162341 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:08 crc kubenswrapper[4753]: I0129 12:08:08.162362 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:08 crc kubenswrapper[4753]: I0129 12:08:08.162378 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:08Z","lastTransitionTime":"2026-01-29T12:08:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:08 crc kubenswrapper[4753]: I0129 12:08:08.172987 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podStartSLOduration=72.172951596 podStartE2EDuration="1m12.172951596s" podCreationTimestamp="2026-01-29 12:06:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:08:08.171440601 +0000 UTC m=+102.423522056" watchObservedRunningTime="2026-01-29 12:08:08.172951596 +0000 UTC m=+102.425033051" Jan 29 12:08:08 crc kubenswrapper[4753]: I0129 12:08:08.173481 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-9dtck" podStartSLOduration=69.173468291 podStartE2EDuration="1m9.173468291s" podCreationTimestamp="2026-01-29 12:06:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:08:08.140018692 +0000 UTC m=+102.392100157" watchObservedRunningTime="2026-01-29 12:08:08.173468291 +0000 UTC m=+102.425549756" Jan 29 12:08:08 crc kubenswrapper[4753]: I0129 12:08:08.264712 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:08 crc kubenswrapper[4753]: I0129 12:08:08.264759 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:08 crc kubenswrapper[4753]: I0129 12:08:08.264769 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:08 crc kubenswrapper[4753]: I0129 12:08:08.264788 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:08 crc kubenswrapper[4753]: I0129 12:08:08.264801 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:08Z","lastTransitionTime":"2026-01-29T12:08:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:08 crc kubenswrapper[4753]: I0129 12:08:08.276312 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" podStartSLOduration=71.27628859 podStartE2EDuration="1m11.27628859s" podCreationTimestamp="2026-01-29 12:06:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:08:08.275648871 +0000 UTC m=+102.527730326" watchObservedRunningTime="2026-01-29 12:08:08.27628859 +0000 UTC m=+102.528370045" Jan 29 12:08:08 crc kubenswrapper[4753]: I0129 12:08:08.367360 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:08 crc kubenswrapper[4753]: I0129 12:08:08.367442 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:08 crc kubenswrapper[4753]: I0129 12:08:08.367454 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:08 crc kubenswrapper[4753]: I0129 12:08:08.367475 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:08 crc kubenswrapper[4753]: I0129 12:08:08.367507 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:08Z","lastTransitionTime":"2026-01-29T12:08:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:08 crc kubenswrapper[4753]: I0129 12:08:08.470887 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:08 crc kubenswrapper[4753]: I0129 12:08:08.470938 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:08 crc kubenswrapper[4753]: I0129 12:08:08.470954 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:08 crc kubenswrapper[4753]: I0129 12:08:08.470977 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:08 crc kubenswrapper[4753]: I0129 12:08:08.470993 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:08Z","lastTransitionTime":"2026-01-29T12:08:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:08 crc kubenswrapper[4753]: I0129 12:08:08.574014 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:08 crc kubenswrapper[4753]: I0129 12:08:08.574056 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:08 crc kubenswrapper[4753]: I0129 12:08:08.574066 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:08 crc kubenswrapper[4753]: I0129 12:08:08.574082 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:08 crc kubenswrapper[4753]: I0129 12:08:08.574091 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:08Z","lastTransitionTime":"2026-01-29T12:08:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:08 crc kubenswrapper[4753]: I0129 12:08:08.669782 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-15 18:49:55.872512388 +0000 UTC Jan 29 12:08:08 crc kubenswrapper[4753]: I0129 12:08:08.676926 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:08 crc kubenswrapper[4753]: I0129 12:08:08.676986 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:08 crc kubenswrapper[4753]: I0129 12:08:08.676999 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:08 crc kubenswrapper[4753]: I0129 12:08:08.677021 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:08 crc kubenswrapper[4753]: I0129 12:08:08.677037 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:08Z","lastTransitionTime":"2026-01-29T12:08:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:08 crc kubenswrapper[4753]: I0129 12:08:08.779960 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:08 crc kubenswrapper[4753]: I0129 12:08:08.780005 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:08 crc kubenswrapper[4753]: I0129 12:08:08.780018 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:08 crc kubenswrapper[4753]: I0129 12:08:08.780040 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:08 crc kubenswrapper[4753]: I0129 12:08:08.780052 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:08Z","lastTransitionTime":"2026-01-29T12:08:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:08 crc kubenswrapper[4753]: I0129 12:08:08.882333 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:08 crc kubenswrapper[4753]: I0129 12:08:08.882378 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:08 crc kubenswrapper[4753]: I0129 12:08:08.882389 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:08 crc kubenswrapper[4753]: I0129 12:08:08.882407 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:08 crc kubenswrapper[4753]: I0129 12:08:08.882418 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:08Z","lastTransitionTime":"2026-01-29T12:08:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:08 crc kubenswrapper[4753]: I0129 12:08:08.887649 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-p6m5g" Jan 29 12:08:08 crc kubenswrapper[4753]: I0129 12:08:08.887672 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 12:08:08 crc kubenswrapper[4753]: I0129 12:08:08.887652 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 12:08:08 crc kubenswrapper[4753]: E0129 12:08:08.887841 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 12:08:08 crc kubenswrapper[4753]: E0129 12:08:08.887760 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-p6m5g" podUID="2412fa08-e643-4225-b494-eb999ea93fce" Jan 29 12:08:08 crc kubenswrapper[4753]: E0129 12:08:08.887964 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 12:08:08 crc kubenswrapper[4753]: I0129 12:08:08.984687 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:08 crc kubenswrapper[4753]: I0129 12:08:08.984741 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:08 crc kubenswrapper[4753]: I0129 12:08:08.984753 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:08 crc kubenswrapper[4753]: I0129 12:08:08.984769 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:08 crc kubenswrapper[4753]: I0129 12:08:08.984779 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:08Z","lastTransitionTime":"2026-01-29T12:08:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:09 crc kubenswrapper[4753]: I0129 12:08:09.086683 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:09 crc kubenswrapper[4753]: I0129 12:08:09.086742 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:09 crc kubenswrapper[4753]: I0129 12:08:09.086754 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:09 crc kubenswrapper[4753]: I0129 12:08:09.086776 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:09 crc kubenswrapper[4753]: I0129 12:08:09.086789 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:09Z","lastTransitionTime":"2026-01-29T12:08:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:09 crc kubenswrapper[4753]: I0129 12:08:09.189471 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:09 crc kubenswrapper[4753]: I0129 12:08:09.189527 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:09 crc kubenswrapper[4753]: I0129 12:08:09.189541 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:09 crc kubenswrapper[4753]: I0129 12:08:09.189561 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:09 crc kubenswrapper[4753]: I0129 12:08:09.189576 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:09Z","lastTransitionTime":"2026-01-29T12:08:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:09 crc kubenswrapper[4753]: I0129 12:08:09.296811 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:09 crc kubenswrapper[4753]: I0129 12:08:09.296860 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:09 crc kubenswrapper[4753]: I0129 12:08:09.296870 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:09 crc kubenswrapper[4753]: I0129 12:08:09.296892 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:09 crc kubenswrapper[4753]: I0129 12:08:09.296903 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:09Z","lastTransitionTime":"2026-01-29T12:08:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:09 crc kubenswrapper[4753]: I0129 12:08:09.400726 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:09 crc kubenswrapper[4753]: I0129 12:08:09.401442 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:09 crc kubenswrapper[4753]: I0129 12:08:09.401484 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:09 crc kubenswrapper[4753]: I0129 12:08:09.401508 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:09 crc kubenswrapper[4753]: I0129 12:08:09.401521 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:09Z","lastTransitionTime":"2026-01-29T12:08:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:09 crc kubenswrapper[4753]: I0129 12:08:09.504266 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:09 crc kubenswrapper[4753]: I0129 12:08:09.504304 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:09 crc kubenswrapper[4753]: I0129 12:08:09.504313 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:09 crc kubenswrapper[4753]: I0129 12:08:09.504331 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:09 crc kubenswrapper[4753]: I0129 12:08:09.504340 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:09Z","lastTransitionTime":"2026-01-29T12:08:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:09 crc kubenswrapper[4753]: I0129 12:08:09.607681 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:09 crc kubenswrapper[4753]: I0129 12:08:09.607733 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:09 crc kubenswrapper[4753]: I0129 12:08:09.607752 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:09 crc kubenswrapper[4753]: I0129 12:08:09.607771 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:09 crc kubenswrapper[4753]: I0129 12:08:09.607781 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:09Z","lastTransitionTime":"2026-01-29T12:08:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:09 crc kubenswrapper[4753]: I0129 12:08:09.670693 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-30 11:11:01.79191532 +0000 UTC Jan 29 12:08:09 crc kubenswrapper[4753]: I0129 12:08:09.711482 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:09 crc kubenswrapper[4753]: I0129 12:08:09.711542 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:09 crc kubenswrapper[4753]: I0129 12:08:09.711553 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:09 crc kubenswrapper[4753]: I0129 12:08:09.711568 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:09 crc kubenswrapper[4753]: I0129 12:08:09.711577 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:09Z","lastTransitionTime":"2026-01-29T12:08:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:09 crc kubenswrapper[4753]: I0129 12:08:09.814694 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:09 crc kubenswrapper[4753]: I0129 12:08:09.814790 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:09 crc kubenswrapper[4753]: I0129 12:08:09.814816 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:09 crc kubenswrapper[4753]: I0129 12:08:09.814848 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:09 crc kubenswrapper[4753]: I0129 12:08:09.814870 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:09Z","lastTransitionTime":"2026-01-29T12:08:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:09 crc kubenswrapper[4753]: I0129 12:08:09.888591 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 12:08:09 crc kubenswrapper[4753]: E0129 12:08:09.888800 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 12:08:09 crc kubenswrapper[4753]: I0129 12:08:09.889583 4753 scope.go:117] "RemoveContainer" containerID="e7138397dae5e90b30b0cdec02d29c108d423b3c94b07ea63e1560c4f02ed607" Jan 29 12:08:09 crc kubenswrapper[4753]: I0129 12:08:09.919829 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:09 crc kubenswrapper[4753]: I0129 12:08:09.919891 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:09 crc kubenswrapper[4753]: I0129 12:08:09.919904 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:09 crc kubenswrapper[4753]: I0129 12:08:09.919927 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:09 crc kubenswrapper[4753]: I0129 12:08:09.919942 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:09Z","lastTransitionTime":"2026-01-29T12:08:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:10 crc kubenswrapper[4753]: I0129 12:08:10.023238 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:10 crc kubenswrapper[4753]: I0129 12:08:10.023310 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:10 crc kubenswrapper[4753]: I0129 12:08:10.023323 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:10 crc kubenswrapper[4753]: I0129 12:08:10.023356 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:10 crc kubenswrapper[4753]: I0129 12:08:10.023380 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:10Z","lastTransitionTime":"2026-01-29T12:08:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:10 crc kubenswrapper[4753]: I0129 12:08:10.133134 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:10 crc kubenswrapper[4753]: I0129 12:08:10.133219 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:10 crc kubenswrapper[4753]: I0129 12:08:10.133256 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:10 crc kubenswrapper[4753]: I0129 12:08:10.133279 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:10 crc kubenswrapper[4753]: I0129 12:08:10.133291 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:10Z","lastTransitionTime":"2026-01-29T12:08:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:10 crc kubenswrapper[4753]: I0129 12:08:10.235969 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:10 crc kubenswrapper[4753]: I0129 12:08:10.236018 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:10 crc kubenswrapper[4753]: I0129 12:08:10.236200 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:10 crc kubenswrapper[4753]: I0129 12:08:10.236328 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:10 crc kubenswrapper[4753]: I0129 12:08:10.236355 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:10Z","lastTransitionTime":"2026-01-29T12:08:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:10 crc kubenswrapper[4753]: I0129 12:08:10.339735 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:10 crc kubenswrapper[4753]: I0129 12:08:10.339772 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:10 crc kubenswrapper[4753]: I0129 12:08:10.339782 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:10 crc kubenswrapper[4753]: I0129 12:08:10.339799 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:10 crc kubenswrapper[4753]: I0129 12:08:10.339809 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:10Z","lastTransitionTime":"2026-01-29T12:08:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:10 crc kubenswrapper[4753]: I0129 12:08:10.442684 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:10 crc kubenswrapper[4753]: I0129 12:08:10.442798 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:10 crc kubenswrapper[4753]: I0129 12:08:10.442814 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:10 crc kubenswrapper[4753]: I0129 12:08:10.442873 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:10 crc kubenswrapper[4753]: I0129 12:08:10.442893 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:10Z","lastTransitionTime":"2026-01-29T12:08:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:10 crc kubenswrapper[4753]: I0129 12:08:10.546432 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:10 crc kubenswrapper[4753]: I0129 12:08:10.546489 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:10 crc kubenswrapper[4753]: I0129 12:08:10.546500 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:10 crc kubenswrapper[4753]: I0129 12:08:10.546526 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:10 crc kubenswrapper[4753]: I0129 12:08:10.546541 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:10Z","lastTransitionTime":"2026-01-29T12:08:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:10 crc kubenswrapper[4753]: I0129 12:08:10.650128 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:10 crc kubenswrapper[4753]: I0129 12:08:10.650198 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:10 crc kubenswrapper[4753]: I0129 12:08:10.650212 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:10 crc kubenswrapper[4753]: I0129 12:08:10.650261 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:10 crc kubenswrapper[4753]: I0129 12:08:10.650277 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:10Z","lastTransitionTime":"2026-01-29T12:08:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:10 crc kubenswrapper[4753]: I0129 12:08:10.671946 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-24 04:57:27.814920768 +0000 UTC Jan 29 12:08:10 crc kubenswrapper[4753]: I0129 12:08:10.752849 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:10 crc kubenswrapper[4753]: I0129 12:08:10.752919 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:10 crc kubenswrapper[4753]: I0129 12:08:10.752936 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:10 crc kubenswrapper[4753]: I0129 12:08:10.752958 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:10 crc kubenswrapper[4753]: I0129 12:08:10.752971 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:10Z","lastTransitionTime":"2026-01-29T12:08:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:10 crc kubenswrapper[4753]: I0129 12:08:10.855558 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:10 crc kubenswrapper[4753]: I0129 12:08:10.855633 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:10 crc kubenswrapper[4753]: I0129 12:08:10.855659 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:10 crc kubenswrapper[4753]: I0129 12:08:10.855684 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:10 crc kubenswrapper[4753]: I0129 12:08:10.855701 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:10Z","lastTransitionTime":"2026-01-29T12:08:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:10 crc kubenswrapper[4753]: I0129 12:08:10.888017 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-p6m5g" Jan 29 12:08:10 crc kubenswrapper[4753]: I0129 12:08:10.888193 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 12:08:10 crc kubenswrapper[4753]: I0129 12:08:10.888328 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 12:08:10 crc kubenswrapper[4753]: E0129 12:08:10.888202 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-p6m5g" podUID="2412fa08-e643-4225-b494-eb999ea93fce" Jan 29 12:08:10 crc kubenswrapper[4753]: E0129 12:08:10.888422 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 12:08:10 crc kubenswrapper[4753]: E0129 12:08:10.888520 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 12:08:10 crc kubenswrapper[4753]: I0129 12:08:10.958705 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:10 crc kubenswrapper[4753]: I0129 12:08:10.958750 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:10 crc kubenswrapper[4753]: I0129 12:08:10.958761 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:10 crc kubenswrapper[4753]: I0129 12:08:10.958778 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:10 crc kubenswrapper[4753]: I0129 12:08:10.958789 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:10Z","lastTransitionTime":"2026-01-29T12:08:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:10 crc kubenswrapper[4753]: I0129 12:08:10.968501 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-nzkvz_80bec2ab-0a88-4818-9339-760edda3b07e/ovnkube-controller/2.log" Jan 29 12:08:10 crc kubenswrapper[4753]: I0129 12:08:10.999713 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" event={"ID":"80bec2ab-0a88-4818-9339-760edda3b07e","Type":"ContainerStarted","Data":"9ee2d75fd4d36ec3e097a8aa1ca55cef7754f5a5ed88d149bcf63e90d0fbf2c9"} Jan 29 12:08:11 crc kubenswrapper[4753]: I0129 12:08:11.000249 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" Jan 29 12:08:11 crc kubenswrapper[4753]: I0129 12:08:11.061166 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:11 crc kubenswrapper[4753]: I0129 12:08:11.061298 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:11 crc kubenswrapper[4753]: I0129 12:08:11.061317 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:11 crc kubenswrapper[4753]: I0129 12:08:11.061337 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:11 crc kubenswrapper[4753]: I0129 12:08:11.061348 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:11Z","lastTransitionTime":"2026-01-29T12:08:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:11 crc kubenswrapper[4753]: I0129 12:08:11.163966 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:11 crc kubenswrapper[4753]: I0129 12:08:11.164014 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:11 crc kubenswrapper[4753]: I0129 12:08:11.164030 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:11 crc kubenswrapper[4753]: I0129 12:08:11.164072 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:11 crc kubenswrapper[4753]: I0129 12:08:11.164085 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:11Z","lastTransitionTime":"2026-01-29T12:08:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:11 crc kubenswrapper[4753]: I0129 12:08:11.267349 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:11 crc kubenswrapper[4753]: I0129 12:08:11.267401 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:11 crc kubenswrapper[4753]: I0129 12:08:11.267413 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:11 crc kubenswrapper[4753]: I0129 12:08:11.267432 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:11 crc kubenswrapper[4753]: I0129 12:08:11.267441 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:11Z","lastTransitionTime":"2026-01-29T12:08:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:11 crc kubenswrapper[4753]: I0129 12:08:11.370021 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:11 crc kubenswrapper[4753]: I0129 12:08:11.370072 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:11 crc kubenswrapper[4753]: I0129 12:08:11.370083 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:11 crc kubenswrapper[4753]: I0129 12:08:11.370099 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:11 crc kubenswrapper[4753]: I0129 12:08:11.370108 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:11Z","lastTransitionTime":"2026-01-29T12:08:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:11 crc kubenswrapper[4753]: I0129 12:08:11.473374 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:11 crc kubenswrapper[4753]: I0129 12:08:11.473436 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:11 crc kubenswrapper[4753]: I0129 12:08:11.473453 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:11 crc kubenswrapper[4753]: I0129 12:08:11.473474 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:11 crc kubenswrapper[4753]: I0129 12:08:11.473487 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:11Z","lastTransitionTime":"2026-01-29T12:08:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:11 crc kubenswrapper[4753]: I0129 12:08:11.576559 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:11 crc kubenswrapper[4753]: I0129 12:08:11.576636 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:11 crc kubenswrapper[4753]: I0129 12:08:11.576651 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:11 crc kubenswrapper[4753]: I0129 12:08:11.576672 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:11 crc kubenswrapper[4753]: I0129 12:08:11.576685 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:11Z","lastTransitionTime":"2026-01-29T12:08:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:11 crc kubenswrapper[4753]: I0129 12:08:11.672182 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-11 10:55:48.434172741 +0000 UTC Jan 29 12:08:11 crc kubenswrapper[4753]: I0129 12:08:11.679084 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:11 crc kubenswrapper[4753]: I0129 12:08:11.679155 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:11 crc kubenswrapper[4753]: I0129 12:08:11.679170 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:11 crc kubenswrapper[4753]: I0129 12:08:11.679191 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:11 crc kubenswrapper[4753]: I0129 12:08:11.679204 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:11Z","lastTransitionTime":"2026-01-29T12:08:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:11 crc kubenswrapper[4753]: I0129 12:08:11.782083 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:11 crc kubenswrapper[4753]: I0129 12:08:11.782136 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:11 crc kubenswrapper[4753]: I0129 12:08:11.782156 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:11 crc kubenswrapper[4753]: I0129 12:08:11.782184 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:11 crc kubenswrapper[4753]: I0129 12:08:11.782201 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:11Z","lastTransitionTime":"2026-01-29T12:08:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:11 crc kubenswrapper[4753]: I0129 12:08:11.890746 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:11 crc kubenswrapper[4753]: I0129 12:08:11.890852 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:11 crc kubenswrapper[4753]: I0129 12:08:11.890877 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:11 crc kubenswrapper[4753]: I0129 12:08:11.890892 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 12:08:11 crc kubenswrapper[4753]: I0129 12:08:11.890906 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:11 crc kubenswrapper[4753]: I0129 12:08:11.891072 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:11Z","lastTransitionTime":"2026-01-29T12:08:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:11 crc kubenswrapper[4753]: E0129 12:08:11.891360 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 12:08:12 crc kubenswrapper[4753]: I0129 12:08:12.000046 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:12 crc kubenswrapper[4753]: I0129 12:08:12.000124 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:12 crc kubenswrapper[4753]: I0129 12:08:12.000150 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:12 crc kubenswrapper[4753]: I0129 12:08:12.000261 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:12 crc kubenswrapper[4753]: I0129 12:08:12.000284 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:12Z","lastTransitionTime":"2026-01-29T12:08:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:12 crc kubenswrapper[4753]: I0129 12:08:12.107412 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:12 crc kubenswrapper[4753]: I0129 12:08:12.107462 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:12 crc kubenswrapper[4753]: I0129 12:08:12.107477 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:12 crc kubenswrapper[4753]: I0129 12:08:12.107501 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:12 crc kubenswrapper[4753]: I0129 12:08:12.107518 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:12Z","lastTransitionTime":"2026-01-29T12:08:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:12 crc kubenswrapper[4753]: I0129 12:08:12.210367 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:12 crc kubenswrapper[4753]: I0129 12:08:12.210411 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:12 crc kubenswrapper[4753]: I0129 12:08:12.210421 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:12 crc kubenswrapper[4753]: I0129 12:08:12.210440 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:12 crc kubenswrapper[4753]: I0129 12:08:12.210451 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:12Z","lastTransitionTime":"2026-01-29T12:08:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:12 crc kubenswrapper[4753]: I0129 12:08:12.312990 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:12 crc kubenswrapper[4753]: I0129 12:08:12.313024 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:12 crc kubenswrapper[4753]: I0129 12:08:12.313032 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:12 crc kubenswrapper[4753]: I0129 12:08:12.313047 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:12 crc kubenswrapper[4753]: I0129 12:08:12.313056 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:12Z","lastTransitionTime":"2026-01-29T12:08:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:12 crc kubenswrapper[4753]: I0129 12:08:12.416348 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:12 crc kubenswrapper[4753]: I0129 12:08:12.416397 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:12 crc kubenswrapper[4753]: I0129 12:08:12.416411 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:12 crc kubenswrapper[4753]: I0129 12:08:12.416431 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:12 crc kubenswrapper[4753]: I0129 12:08:12.416445 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:12Z","lastTransitionTime":"2026-01-29T12:08:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:12 crc kubenswrapper[4753]: I0129 12:08:12.519462 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:12 crc kubenswrapper[4753]: I0129 12:08:12.519517 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:12 crc kubenswrapper[4753]: I0129 12:08:12.519528 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:12 crc kubenswrapper[4753]: I0129 12:08:12.519548 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:12 crc kubenswrapper[4753]: I0129 12:08:12.519566 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:12Z","lastTransitionTime":"2026-01-29T12:08:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:12 crc kubenswrapper[4753]: I0129 12:08:12.597527 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" podStartSLOduration=74.597500108 podStartE2EDuration="1m14.597500108s" podCreationTimestamp="2026-01-29 12:06:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:08:11.036967397 +0000 UTC m=+105.289048872" watchObservedRunningTime="2026-01-29 12:08:12.597500108 +0000 UTC m=+106.849581563" Jan 29 12:08:12 crc kubenswrapper[4753]: I0129 12:08:12.598646 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-p6m5g"] Jan 29 12:08:12 crc kubenswrapper[4753]: I0129 12:08:12.598816 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-p6m5g" Jan 29 12:08:12 crc kubenswrapper[4753]: E0129 12:08:12.598956 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-p6m5g" podUID="2412fa08-e643-4225-b494-eb999ea93fce" Jan 29 12:08:12 crc kubenswrapper[4753]: I0129 12:08:12.622365 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:12 crc kubenswrapper[4753]: I0129 12:08:12.622412 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:12 crc kubenswrapper[4753]: I0129 12:08:12.622422 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:12 crc kubenswrapper[4753]: I0129 12:08:12.622439 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:12 crc kubenswrapper[4753]: I0129 12:08:12.622451 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:12Z","lastTransitionTime":"2026-01-29T12:08:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:12 crc kubenswrapper[4753]: I0129 12:08:12.672561 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-13 04:08:46.234316768 +0000 UTC Jan 29 12:08:12 crc kubenswrapper[4753]: I0129 12:08:12.724971 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:12 crc kubenswrapper[4753]: I0129 12:08:12.725016 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:12 crc kubenswrapper[4753]: I0129 12:08:12.725026 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:12 crc kubenswrapper[4753]: I0129 12:08:12.725048 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:12 crc kubenswrapper[4753]: I0129 12:08:12.725069 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:12Z","lastTransitionTime":"2026-01-29T12:08:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:12 crc kubenswrapper[4753]: I0129 12:08:12.829812 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:12 crc kubenswrapper[4753]: I0129 12:08:12.830136 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:12 crc kubenswrapper[4753]: I0129 12:08:12.830150 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:12 crc kubenswrapper[4753]: I0129 12:08:12.830171 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:12 crc kubenswrapper[4753]: I0129 12:08:12.830184 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:12Z","lastTransitionTime":"2026-01-29T12:08:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:12 crc kubenswrapper[4753]: I0129 12:08:12.887783 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 12:08:12 crc kubenswrapper[4753]: I0129 12:08:12.887872 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 12:08:12 crc kubenswrapper[4753]: E0129 12:08:12.888022 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 12:08:12 crc kubenswrapper[4753]: E0129 12:08:12.888368 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 12:08:12 crc kubenswrapper[4753]: I0129 12:08:12.932578 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:12 crc kubenswrapper[4753]: I0129 12:08:12.932647 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:12 crc kubenswrapper[4753]: I0129 12:08:12.932663 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:12 crc kubenswrapper[4753]: I0129 12:08:12.932693 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:12 crc kubenswrapper[4753]: I0129 12:08:12.932719 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:12Z","lastTransitionTime":"2026-01-29T12:08:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:13 crc kubenswrapper[4753]: I0129 12:08:13.035634 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:13 crc kubenswrapper[4753]: I0129 12:08:13.035684 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:13 crc kubenswrapper[4753]: I0129 12:08:13.035700 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:13 crc kubenswrapper[4753]: I0129 12:08:13.035719 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:13 crc kubenswrapper[4753]: I0129 12:08:13.035731 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:13Z","lastTransitionTime":"2026-01-29T12:08:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:13 crc kubenswrapper[4753]: I0129 12:08:13.138296 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:13 crc kubenswrapper[4753]: I0129 12:08:13.138384 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:13 crc kubenswrapper[4753]: I0129 12:08:13.138406 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:13 crc kubenswrapper[4753]: I0129 12:08:13.138432 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:13 crc kubenswrapper[4753]: I0129 12:08:13.138451 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:13Z","lastTransitionTime":"2026-01-29T12:08:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:13 crc kubenswrapper[4753]: I0129 12:08:13.241352 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:13 crc kubenswrapper[4753]: I0129 12:08:13.241400 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:13 crc kubenswrapper[4753]: I0129 12:08:13.241417 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:13 crc kubenswrapper[4753]: I0129 12:08:13.241438 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:13 crc kubenswrapper[4753]: I0129 12:08:13.241451 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:13Z","lastTransitionTime":"2026-01-29T12:08:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:13 crc kubenswrapper[4753]: I0129 12:08:13.343951 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:13 crc kubenswrapper[4753]: I0129 12:08:13.344017 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:13 crc kubenswrapper[4753]: I0129 12:08:13.344030 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:13 crc kubenswrapper[4753]: I0129 12:08:13.344052 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:13 crc kubenswrapper[4753]: I0129 12:08:13.344066 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:13Z","lastTransitionTime":"2026-01-29T12:08:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:13 crc kubenswrapper[4753]: I0129 12:08:13.447642 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:13 crc kubenswrapper[4753]: I0129 12:08:13.447706 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:13 crc kubenswrapper[4753]: I0129 12:08:13.447718 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:13 crc kubenswrapper[4753]: I0129 12:08:13.447742 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:13 crc kubenswrapper[4753]: I0129 12:08:13.447753 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:13Z","lastTransitionTime":"2026-01-29T12:08:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:13 crc kubenswrapper[4753]: I0129 12:08:13.550959 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:13 crc kubenswrapper[4753]: I0129 12:08:13.551015 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:13 crc kubenswrapper[4753]: I0129 12:08:13.551028 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:13 crc kubenswrapper[4753]: I0129 12:08:13.551056 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:13 crc kubenswrapper[4753]: I0129 12:08:13.551068 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:13Z","lastTransitionTime":"2026-01-29T12:08:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:13 crc kubenswrapper[4753]: I0129 12:08:13.653697 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:13 crc kubenswrapper[4753]: I0129 12:08:13.653744 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:13 crc kubenswrapper[4753]: I0129 12:08:13.653756 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:13 crc kubenswrapper[4753]: I0129 12:08:13.653774 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:13 crc kubenswrapper[4753]: I0129 12:08:13.653787 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:13Z","lastTransitionTime":"2026-01-29T12:08:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:13 crc kubenswrapper[4753]: I0129 12:08:13.673549 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-06 02:26:30.704235409 +0000 UTC Jan 29 12:08:13 crc kubenswrapper[4753]: I0129 12:08:13.756268 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:13 crc kubenswrapper[4753]: I0129 12:08:13.756312 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:13 crc kubenswrapper[4753]: I0129 12:08:13.756325 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:13 crc kubenswrapper[4753]: I0129 12:08:13.756347 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:13 crc kubenswrapper[4753]: I0129 12:08:13.756358 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:13Z","lastTransitionTime":"2026-01-29T12:08:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:13 crc kubenswrapper[4753]: I0129 12:08:13.859208 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:13 crc kubenswrapper[4753]: I0129 12:08:13.859302 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:13 crc kubenswrapper[4753]: I0129 12:08:13.859315 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:13 crc kubenswrapper[4753]: I0129 12:08:13.859340 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:13 crc kubenswrapper[4753]: I0129 12:08:13.859353 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:13Z","lastTransitionTime":"2026-01-29T12:08:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:13 crc kubenswrapper[4753]: I0129 12:08:13.887706 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 12:08:13 crc kubenswrapper[4753]: I0129 12:08:13.887816 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-p6m5g" Jan 29 12:08:13 crc kubenswrapper[4753]: E0129 12:08:13.888217 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 12:08:13 crc kubenswrapper[4753]: E0129 12:08:13.888292 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-p6m5g" podUID="2412fa08-e643-4225-b494-eb999ea93fce" Jan 29 12:08:13 crc kubenswrapper[4753]: I0129 12:08:13.963246 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:13 crc kubenswrapper[4753]: I0129 12:08:13.963290 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:13 crc kubenswrapper[4753]: I0129 12:08:13.963302 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:13 crc kubenswrapper[4753]: I0129 12:08:13.963321 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:13 crc kubenswrapper[4753]: I0129 12:08:13.963332 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:13Z","lastTransitionTime":"2026-01-29T12:08:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:14 crc kubenswrapper[4753]: I0129 12:08:14.066039 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:14 crc kubenswrapper[4753]: I0129 12:08:14.066090 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:14 crc kubenswrapper[4753]: I0129 12:08:14.066103 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:14 crc kubenswrapper[4753]: I0129 12:08:14.066120 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:14 crc kubenswrapper[4753]: I0129 12:08:14.066129 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:14Z","lastTransitionTime":"2026-01-29T12:08:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:14 crc kubenswrapper[4753]: I0129 12:08:14.169088 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:14 crc kubenswrapper[4753]: I0129 12:08:14.169139 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:14 crc kubenswrapper[4753]: I0129 12:08:14.169148 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:14 crc kubenswrapper[4753]: I0129 12:08:14.169168 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:14 crc kubenswrapper[4753]: I0129 12:08:14.169178 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:14Z","lastTransitionTime":"2026-01-29T12:08:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:14 crc kubenswrapper[4753]: I0129 12:08:14.271954 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:14 crc kubenswrapper[4753]: I0129 12:08:14.272010 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:14 crc kubenswrapper[4753]: I0129 12:08:14.272023 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:14 crc kubenswrapper[4753]: I0129 12:08:14.272044 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:14 crc kubenswrapper[4753]: I0129 12:08:14.272056 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:14Z","lastTransitionTime":"2026-01-29T12:08:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:14 crc kubenswrapper[4753]: I0129 12:08:14.375353 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:14 crc kubenswrapper[4753]: I0129 12:08:14.375396 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:14 crc kubenswrapper[4753]: I0129 12:08:14.375404 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:14 crc kubenswrapper[4753]: I0129 12:08:14.375424 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:14 crc kubenswrapper[4753]: I0129 12:08:14.375446 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:14Z","lastTransitionTime":"2026-01-29T12:08:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:14 crc kubenswrapper[4753]: I0129 12:08:14.478743 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:14 crc kubenswrapper[4753]: I0129 12:08:14.478818 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:14 crc kubenswrapper[4753]: I0129 12:08:14.478829 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:14 crc kubenswrapper[4753]: I0129 12:08:14.478858 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:14 crc kubenswrapper[4753]: I0129 12:08:14.478874 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:14Z","lastTransitionTime":"2026-01-29T12:08:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:14 crc kubenswrapper[4753]: I0129 12:08:14.582019 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:14 crc kubenswrapper[4753]: I0129 12:08:14.582088 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:14 crc kubenswrapper[4753]: I0129 12:08:14.582102 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:14 crc kubenswrapper[4753]: I0129 12:08:14.582122 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:14 crc kubenswrapper[4753]: I0129 12:08:14.582138 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:14Z","lastTransitionTime":"2026-01-29T12:08:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:14 crc kubenswrapper[4753]: I0129 12:08:14.673923 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-16 14:58:00.156771377 +0000 UTC Jan 29 12:08:14 crc kubenswrapper[4753]: I0129 12:08:14.685633 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:14 crc kubenswrapper[4753]: I0129 12:08:14.685698 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:14 crc kubenswrapper[4753]: I0129 12:08:14.685710 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:14 crc kubenswrapper[4753]: I0129 12:08:14.685731 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:14 crc kubenswrapper[4753]: I0129 12:08:14.685744 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:14Z","lastTransitionTime":"2026-01-29T12:08:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:14 crc kubenswrapper[4753]: I0129 12:08:14.716472 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 12:08:14 crc kubenswrapper[4753]: I0129 12:08:14.716523 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 12:08:14 crc kubenswrapper[4753]: I0129 12:08:14.716535 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 12:08:14 crc kubenswrapper[4753]: I0129 12:08:14.716555 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 12:08:14 crc kubenswrapper[4753]: I0129 12:08:14.716567 4753 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T12:08:14Z","lastTransitionTime":"2026-01-29T12:08:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 12:08:14 crc kubenswrapper[4753]: I0129 12:08:14.801971 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-version/cluster-version-operator-5c965bbfc6-q77m6"] Jan 29 12:08:14 crc kubenswrapper[4753]: I0129 12:08:14.802585 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-q77m6" Jan 29 12:08:14 crc kubenswrapper[4753]: I0129 12:08:14.805586 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Jan 29 12:08:14 crc kubenswrapper[4753]: I0129 12:08:14.805705 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Jan 29 12:08:14 crc kubenswrapper[4753]: I0129 12:08:14.805816 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Jan 29 12:08:14 crc kubenswrapper[4753]: I0129 12:08:14.807277 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Jan 29 12:08:14 crc kubenswrapper[4753]: I0129 12:08:14.876822 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/20fbef6e-603f-424d-9bb3-46d36becb585-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-q77m6\" (UID: \"20fbef6e-603f-424d-9bb3-46d36becb585\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-q77m6" Jan 29 12:08:14 crc kubenswrapper[4753]: I0129 12:08:14.876882 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/20fbef6e-603f-424d-9bb3-46d36becb585-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-q77m6\" (UID: \"20fbef6e-603f-424d-9bb3-46d36becb585\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-q77m6" Jan 29 12:08:14 crc kubenswrapper[4753]: I0129 12:08:14.876931 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/20fbef6e-603f-424d-9bb3-46d36becb585-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-q77m6\" (UID: \"20fbef6e-603f-424d-9bb3-46d36becb585\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-q77m6" Jan 29 12:08:14 crc kubenswrapper[4753]: I0129 12:08:14.876989 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/20fbef6e-603f-424d-9bb3-46d36becb585-service-ca\") pod \"cluster-version-operator-5c965bbfc6-q77m6\" (UID: \"20fbef6e-603f-424d-9bb3-46d36becb585\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-q77m6" Jan 29 12:08:14 crc kubenswrapper[4753]: I0129 12:08:14.877023 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/20fbef6e-603f-424d-9bb3-46d36becb585-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-q77m6\" (UID: \"20fbef6e-603f-424d-9bb3-46d36becb585\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-q77m6" Jan 29 12:08:14 crc kubenswrapper[4753]: I0129 12:08:14.887738 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 12:08:14 crc kubenswrapper[4753]: I0129 12:08:14.887785 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 12:08:14 crc kubenswrapper[4753]: E0129 12:08:14.887930 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 12:08:14 crc kubenswrapper[4753]: E0129 12:08:14.888064 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 12:08:14 crc kubenswrapper[4753]: I0129 12:08:14.978392 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/20fbef6e-603f-424d-9bb3-46d36becb585-service-ca\") pod \"cluster-version-operator-5c965bbfc6-q77m6\" (UID: \"20fbef6e-603f-424d-9bb3-46d36becb585\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-q77m6" Jan 29 12:08:14 crc kubenswrapper[4753]: I0129 12:08:14.978442 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/20fbef6e-603f-424d-9bb3-46d36becb585-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-q77m6\" (UID: \"20fbef6e-603f-424d-9bb3-46d36becb585\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-q77m6" Jan 29 12:08:14 crc kubenswrapper[4753]: I0129 12:08:14.978482 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/20fbef6e-603f-424d-9bb3-46d36becb585-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-q77m6\" (UID: \"20fbef6e-603f-424d-9bb3-46d36becb585\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-q77m6" Jan 29 12:08:14 crc kubenswrapper[4753]: I0129 12:08:14.978502 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/20fbef6e-603f-424d-9bb3-46d36becb585-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-q77m6\" (UID: \"20fbef6e-603f-424d-9bb3-46d36becb585\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-q77m6" Jan 29 12:08:14 crc kubenswrapper[4753]: I0129 12:08:14.978527 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/20fbef6e-603f-424d-9bb3-46d36becb585-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-q77m6\" (UID: \"20fbef6e-603f-424d-9bb3-46d36becb585\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-q77m6" Jan 29 12:08:14 crc kubenswrapper[4753]: I0129 12:08:14.978594 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/20fbef6e-603f-424d-9bb3-46d36becb585-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-q77m6\" (UID: \"20fbef6e-603f-424d-9bb3-46d36becb585\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-q77m6" Jan 29 12:08:14 crc kubenswrapper[4753]: I0129 12:08:14.978988 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/20fbef6e-603f-424d-9bb3-46d36becb585-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-q77m6\" (UID: \"20fbef6e-603f-424d-9bb3-46d36becb585\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-q77m6" Jan 29 12:08:14 crc kubenswrapper[4753]: I0129 12:08:14.979331 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/20fbef6e-603f-424d-9bb3-46d36becb585-service-ca\") pod \"cluster-version-operator-5c965bbfc6-q77m6\" (UID: \"20fbef6e-603f-424d-9bb3-46d36becb585\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-q77m6" Jan 29 12:08:14 crc kubenswrapper[4753]: I0129 12:08:14.993813 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/20fbef6e-603f-424d-9bb3-46d36becb585-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-q77m6\" (UID: \"20fbef6e-603f-424d-9bb3-46d36becb585\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-q77m6" Jan 29 12:08:14 crc kubenswrapper[4753]: I0129 12:08:14.998249 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/20fbef6e-603f-424d-9bb3-46d36becb585-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-q77m6\" (UID: \"20fbef6e-603f-424d-9bb3-46d36becb585\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-q77m6" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.118105 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-q77m6" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.407071 4753 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeReady" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.407332 4753 kubelet_node_status.go:538] "Fast updating node status as it just became ready" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.458380 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-s4nq8"] Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.459017 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-s4nq8" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.462302 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-dx8db"] Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.476748 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-nrt2l"] Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.477324 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-dx8db" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.477617 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.477675 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-nrt2l" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.477937 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.477934 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.478653 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.478774 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.479125 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.481002 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.481144 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.481708 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.484961 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.489096 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-86kqx"] Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.500489 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-p7d5d"] Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.500865 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-g5td2"] Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.501279 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-86kqx" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.501736 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-5wmqw"] Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.501837 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.502077 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.502165 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.502211 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.502315 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-5wmqw" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.502367 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.502735 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-p7d5d" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.502960 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.503056 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.503086 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-f6z5s"] Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.503127 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.503064 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-g5td2" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.503204 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.504187 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-f6z5s" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.502367 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.504802 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.504879 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-zdhjn"] Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.505029 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.505276 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-zdhjn" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.505316 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-qkqfh"] Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.505597 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-qkqfh" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.506551 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-machine-approver/machine-approver-56656f9798-ltcjq"] Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.506919 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-ltcjq" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.513713 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.514110 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.519376 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.520255 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.520385 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.520473 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.520514 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.520784 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.520893 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console-operator/console-operator-58897d9998-znsp6"] Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.521652 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-znsp6" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.521771 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-255vg"] Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.522539 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-255vg" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.523735 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-w5dnh"] Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.524335 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-w5dnh" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.528053 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.528592 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.528800 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.528858 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.528800 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.529037 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.529153 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-f9d7485db-mql86"] Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.534444 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-rwlht"] Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.534894 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-rwlht" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.535592 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-mql86" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.538096 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.538364 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.539186 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.541018 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-w6vx6"] Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.541791 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.541942 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.542143 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.542185 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.542295 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.542425 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.542569 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.542761 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.542913 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.543141 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.543303 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.543435 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.543561 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.553871 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.554211 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.554399 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.560625 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.561028 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.561065 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.561207 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.561297 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.561507 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.562142 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.562553 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.562791 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.563867 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.567839 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.570872 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/downloads-7954f5f757-b2rb9"] Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.571039 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.571663 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.572286 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-b2rb9" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.572736 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.574768 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.574855 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.575092 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.575285 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.575848 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.576831 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.579394 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.579546 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.579674 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.579806 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.579913 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.581053 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.581356 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.582650 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.583113 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.583189 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.583273 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.586901 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.603491 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.606536 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-dx8db"] Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.607441 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.607583 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.607891 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/4b74c52f-102d-45ae-a789-0c43429e8aa0-available-featuregates\") pod \"openshift-config-operator-7777fb866f-f6z5s\" (UID: \"4b74c52f-102d-45ae-a789-0c43429e8aa0\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-f6z5s" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.607950 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/ddba54c6-0a85-4055-bc93-89e961961cd7-machine-approver-tls\") pod \"machine-approver-56656f9798-ltcjq\" (UID: \"ddba54c6-0a85-4055-bc93-89e961961cd7\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-ltcjq" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.607975 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gthjf\" (UniqueName: \"kubernetes.io/projected/887a7f51-26e3-4d7e-99ff-884f7389171a-kube-api-access-gthjf\") pod \"cluster-image-registry-operator-dc59b4c8b-w5dnh\" (UID: \"887a7f51-26e3-4d7e-99ff-884f7389171a\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-w5dnh" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.607998 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/43f6f4f5-8e79-4142-95ee-84b051b27cf3-metrics-tls\") pod \"dns-operator-744455d44c-255vg\" (UID: \"43f6f4f5-8e79-4142-95ee-84b051b27cf3\") " pod="openshift-dns-operator/dns-operator-744455d44c-255vg" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.608020 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/86659644-dec3-4e1a-ba32-5b4487a2f4c8-etcd-client\") pod \"apiserver-76f77b778f-nrt2l\" (UID: \"86659644-dec3-4e1a-ba32-5b4487a2f4c8\") " pod="openshift-apiserver/apiserver-76f77b778f-nrt2l" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.608142 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.608519 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b3a97911-76f6-4098-91b6-a67368bcac8e-serving-cert\") pod \"authentication-operator-69f744f599-5wmqw\" (UID: \"b3a97911-76f6-4098-91b6-a67368bcac8e\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-5wmqw" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.608728 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4b79l\" (UniqueName: \"kubernetes.io/projected/4b74c52f-102d-45ae-a789-0c43429e8aa0-kube-api-access-4b79l\") pod \"openshift-config-operator-7777fb866f-f6z5s\" (UID: \"4b74c52f-102d-45ae-a789-0c43429e8aa0\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-f6z5s" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.608754 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/86659644-dec3-4e1a-ba32-5b4487a2f4c8-trusted-ca-bundle\") pod \"apiserver-76f77b778f-nrt2l\" (UID: \"86659644-dec3-4e1a-ba32-5b4487a2f4c8\") " pod="openshift-apiserver/apiserver-76f77b778f-nrt2l" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.608786 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/86659644-dec3-4e1a-ba32-5b4487a2f4c8-audit-dir\") pod \"apiserver-76f77b778f-nrt2l\" (UID: \"86659644-dec3-4e1a-ba32-5b4487a2f4c8\") " pod="openshift-apiserver/apiserver-76f77b778f-nrt2l" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.608809 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rxxtp\" (UniqueName: \"kubernetes.io/projected/b3a97911-76f6-4098-91b6-a67368bcac8e-kube-api-access-rxxtp\") pod \"authentication-operator-69f744f599-5wmqw\" (UID: \"b3a97911-76f6-4098-91b6-a67368bcac8e\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-5wmqw" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.608825 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/887a7f51-26e3-4d7e-99ff-884f7389171a-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-w5dnh\" (UID: \"887a7f51-26e3-4d7e-99ff-884f7389171a\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-w5dnh" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.608841 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b3a97911-76f6-4098-91b6-a67368bcac8e-service-ca-bundle\") pod \"authentication-operator-69f744f599-5wmqw\" (UID: \"b3a97911-76f6-4098-91b6-a67368bcac8e\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-5wmqw" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.608866 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d9f7z\" (UniqueName: \"kubernetes.io/projected/86659644-dec3-4e1a-ba32-5b4487a2f4c8-kube-api-access-d9f7z\") pod \"apiserver-76f77b778f-nrt2l\" (UID: \"86659644-dec3-4e1a-ba32-5b4487a2f4c8\") " pod="openshift-apiserver/apiserver-76f77b778f-nrt2l" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.608890 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4b74c52f-102d-45ae-a789-0c43429e8aa0-serving-cert\") pod \"openshift-config-operator-7777fb866f-f6z5s\" (UID: \"4b74c52f-102d-45ae-a789-0c43429e8aa0\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-f6z5s" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.608908 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/86659644-dec3-4e1a-ba32-5b4487a2f4c8-audit\") pod \"apiserver-76f77b778f-nrt2l\" (UID: \"86659644-dec3-4e1a-ba32-5b4487a2f4c8\") " pod="openshift-apiserver/apiserver-76f77b778f-nrt2l" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.608934 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tx6sh\" (UniqueName: \"kubernetes.io/projected/ddba54c6-0a85-4055-bc93-89e961961cd7-kube-api-access-tx6sh\") pod \"machine-approver-56656f9798-ltcjq\" (UID: \"ddba54c6-0a85-4055-bc93-89e961961cd7\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-ltcjq" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.608956 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/ddba54c6-0a85-4055-bc93-89e961961cd7-auth-proxy-config\") pod \"machine-approver-56656f9798-ltcjq\" (UID: \"ddba54c6-0a85-4055-bc93-89e961961cd7\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-ltcjq" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.608972 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/86659644-dec3-4e1a-ba32-5b4487a2f4c8-image-import-ca\") pod \"apiserver-76f77b778f-nrt2l\" (UID: \"86659644-dec3-4e1a-ba32-5b4487a2f4c8\") " pod="openshift-apiserver/apiserver-76f77b778f-nrt2l" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.608991 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ddba54c6-0a85-4055-bc93-89e961961cd7-config\") pod \"machine-approver-56656f9798-ltcjq\" (UID: \"ddba54c6-0a85-4055-bc93-89e961961cd7\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-ltcjq" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.609007 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/86659644-dec3-4e1a-ba32-5b4487a2f4c8-etcd-serving-ca\") pod \"apiserver-76f77b778f-nrt2l\" (UID: \"86659644-dec3-4e1a-ba32-5b4487a2f4c8\") " pod="openshift-apiserver/apiserver-76f77b778f-nrt2l" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.609220 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/887a7f51-26e3-4d7e-99ff-884f7389171a-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-w5dnh\" (UID: \"887a7f51-26e3-4d7e-99ff-884f7389171a\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-w5dnh" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.609258 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/86659644-dec3-4e1a-ba32-5b4487a2f4c8-config\") pod \"apiserver-76f77b778f-nrt2l\" (UID: \"86659644-dec3-4e1a-ba32-5b4487a2f4c8\") " pod="openshift-apiserver/apiserver-76f77b778f-nrt2l" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.609275 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/86659644-dec3-4e1a-ba32-5b4487a2f4c8-serving-cert\") pod \"apiserver-76f77b778f-nrt2l\" (UID: \"86659644-dec3-4e1a-ba32-5b4487a2f4c8\") " pod="openshift-apiserver/apiserver-76f77b778f-nrt2l" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.609304 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/86659644-dec3-4e1a-ba32-5b4487a2f4c8-node-pullsecrets\") pod \"apiserver-76f77b778f-nrt2l\" (UID: \"86659644-dec3-4e1a-ba32-5b4487a2f4c8\") " pod="openshift-apiserver/apiserver-76f77b778f-nrt2l" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.609318 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/86659644-dec3-4e1a-ba32-5b4487a2f4c8-encryption-config\") pod \"apiserver-76f77b778f-nrt2l\" (UID: \"86659644-dec3-4e1a-ba32-5b4487a2f4c8\") " pod="openshift-apiserver/apiserver-76f77b778f-nrt2l" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.609333 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/887a7f51-26e3-4d7e-99ff-884f7389171a-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-w5dnh\" (UID: \"887a7f51-26e3-4d7e-99ff-884f7389171a\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-w5dnh" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.609351 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b3a97911-76f6-4098-91b6-a67368bcac8e-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-5wmqw\" (UID: \"b3a97911-76f6-4098-91b6-a67368bcac8e\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-5wmqw" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.609368 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jg4jj\" (UniqueName: \"kubernetes.io/projected/43f6f4f5-8e79-4142-95ee-84b051b27cf3-kube-api-access-jg4jj\") pod \"dns-operator-744455d44c-255vg\" (UID: \"43f6f4f5-8e79-4142-95ee-84b051b27cf3\") " pod="openshift-dns-operator/dns-operator-744455d44c-255vg" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.609441 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b3a97911-76f6-4098-91b6-a67368bcac8e-config\") pod \"authentication-operator-69f744f599-5wmqw\" (UID: \"b3a97911-76f6-4098-91b6-a67368bcac8e\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-5wmqw" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.609876 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.610357 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.610450 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.610599 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.610734 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.611328 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.615691 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-4bpqt"] Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.616908 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-4bpqt" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.620825 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.643901 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.645138 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.646887 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-gkf8g"] Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.647667 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress/router-default-5444994796-jpxd9"] Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.648236 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-jpxd9" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.648254 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-gkf8g" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.649845 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.651010 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-fp6bq"] Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.656461 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.656686 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.656785 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.658292 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.659772 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-s4nq8"] Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.659830 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-f6z5s"] Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.659854 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-znsp6"] Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.659870 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-zdhjn"] Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.660000 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-fp6bq" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.661496 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-lpjkw"] Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.662577 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-lpjkw" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.663966 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.667244 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-5wmqw"] Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.668467 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-nbdct"] Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.674381 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-nbdct" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.674471 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-16 04:52:37.984756541 +0000 UTC Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.674558 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Rotating certificates Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.677401 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.678671 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-6xpng"] Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.679969 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-6xpng" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.680463 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-p7d5d"] Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.683248 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.685827 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.690488 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-g5td2"] Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.691557 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-qkqfh"] Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.702691 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.926024 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/ddba54c6-0a85-4055-bc93-89e961961cd7-auth-proxy-config\") pod \"machine-approver-56656f9798-ltcjq\" (UID: \"ddba54c6-0a85-4055-bc93-89e961961cd7\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-ltcjq" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.926273 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/86659644-dec3-4e1a-ba32-5b4487a2f4c8-image-import-ca\") pod \"apiserver-76f77b778f-nrt2l\" (UID: \"86659644-dec3-4e1a-ba32-5b4487a2f4c8\") " pod="openshift-apiserver/apiserver-76f77b778f-nrt2l" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.926316 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/86659644-dec3-4e1a-ba32-5b4487a2f4c8-etcd-serving-ca\") pod \"apiserver-76f77b778f-nrt2l\" (UID: \"86659644-dec3-4e1a-ba32-5b4487a2f4c8\") " pod="openshift-apiserver/apiserver-76f77b778f-nrt2l" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.926360 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ddba54c6-0a85-4055-bc93-89e961961cd7-config\") pod \"machine-approver-56656f9798-ltcjq\" (UID: \"ddba54c6-0a85-4055-bc93-89e961961cd7\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-ltcjq" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.926426 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/887a7f51-26e3-4d7e-99ff-884f7389171a-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-w5dnh\" (UID: \"887a7f51-26e3-4d7e-99ff-884f7389171a\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-w5dnh" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.926512 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/86659644-dec3-4e1a-ba32-5b4487a2f4c8-config\") pod \"apiserver-76f77b778f-nrt2l\" (UID: \"86659644-dec3-4e1a-ba32-5b4487a2f4c8\") " pod="openshift-apiserver/apiserver-76f77b778f-nrt2l" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.926565 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/86659644-dec3-4e1a-ba32-5b4487a2f4c8-serving-cert\") pod \"apiserver-76f77b778f-nrt2l\" (UID: \"86659644-dec3-4e1a-ba32-5b4487a2f4c8\") " pod="openshift-apiserver/apiserver-76f77b778f-nrt2l" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.926636 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/86659644-dec3-4e1a-ba32-5b4487a2f4c8-node-pullsecrets\") pod \"apiserver-76f77b778f-nrt2l\" (UID: \"86659644-dec3-4e1a-ba32-5b4487a2f4c8\") " pod="openshift-apiserver/apiserver-76f77b778f-nrt2l" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.926669 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/86659644-dec3-4e1a-ba32-5b4487a2f4c8-encryption-config\") pod \"apiserver-76f77b778f-nrt2l\" (UID: \"86659644-dec3-4e1a-ba32-5b4487a2f4c8\") " pod="openshift-apiserver/apiserver-76f77b778f-nrt2l" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.926741 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/887a7f51-26e3-4d7e-99ff-884f7389171a-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-w5dnh\" (UID: \"887a7f51-26e3-4d7e-99ff-884f7389171a\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-w5dnh" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.926774 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b3a97911-76f6-4098-91b6-a67368bcac8e-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-5wmqw\" (UID: \"b3a97911-76f6-4098-91b6-a67368bcac8e\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-5wmqw" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.926802 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jg4jj\" (UniqueName: \"kubernetes.io/projected/43f6f4f5-8e79-4142-95ee-84b051b27cf3-kube-api-access-jg4jj\") pod \"dns-operator-744455d44c-255vg\" (UID: \"43f6f4f5-8e79-4142-95ee-84b051b27cf3\") " pod="openshift-dns-operator/dns-operator-744455d44c-255vg" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.926858 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b3a97911-76f6-4098-91b6-a67368bcac8e-config\") pod \"authentication-operator-69f744f599-5wmqw\" (UID: \"b3a97911-76f6-4098-91b6-a67368bcac8e\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-5wmqw" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.926916 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/4b74c52f-102d-45ae-a789-0c43429e8aa0-available-featuregates\") pod \"openshift-config-operator-7777fb866f-f6z5s\" (UID: \"4b74c52f-102d-45ae-a789-0c43429e8aa0\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-f6z5s" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.926947 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/ddba54c6-0a85-4055-bc93-89e961961cd7-machine-approver-tls\") pod \"machine-approver-56656f9798-ltcjq\" (UID: \"ddba54c6-0a85-4055-bc93-89e961961cd7\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-ltcjq" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.926979 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gthjf\" (UniqueName: \"kubernetes.io/projected/887a7f51-26e3-4d7e-99ff-884f7389171a-kube-api-access-gthjf\") pod \"cluster-image-registry-operator-dc59b4c8b-w5dnh\" (UID: \"887a7f51-26e3-4d7e-99ff-884f7389171a\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-w5dnh" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.927021 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/86659644-dec3-4e1a-ba32-5b4487a2f4c8-etcd-client\") pod \"apiserver-76f77b778f-nrt2l\" (UID: \"86659644-dec3-4e1a-ba32-5b4487a2f4c8\") " pod="openshift-apiserver/apiserver-76f77b778f-nrt2l" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.927054 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/43f6f4f5-8e79-4142-95ee-84b051b27cf3-metrics-tls\") pod \"dns-operator-744455d44c-255vg\" (UID: \"43f6f4f5-8e79-4142-95ee-84b051b27cf3\") " pod="openshift-dns-operator/dns-operator-744455d44c-255vg" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.927080 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/86659644-dec3-4e1a-ba32-5b4487a2f4c8-trusted-ca-bundle\") pod \"apiserver-76f77b778f-nrt2l\" (UID: \"86659644-dec3-4e1a-ba32-5b4487a2f4c8\") " pod="openshift-apiserver/apiserver-76f77b778f-nrt2l" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.927113 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b3a97911-76f6-4098-91b6-a67368bcac8e-serving-cert\") pod \"authentication-operator-69f744f599-5wmqw\" (UID: \"b3a97911-76f6-4098-91b6-a67368bcac8e\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-5wmqw" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.927139 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4b79l\" (UniqueName: \"kubernetes.io/projected/4b74c52f-102d-45ae-a789-0c43429e8aa0-kube-api-access-4b79l\") pod \"openshift-config-operator-7777fb866f-f6z5s\" (UID: \"4b74c52f-102d-45ae-a789-0c43429e8aa0\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-f6z5s" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.927239 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/86659644-dec3-4e1a-ba32-5b4487a2f4c8-audit-dir\") pod \"apiserver-76f77b778f-nrt2l\" (UID: \"86659644-dec3-4e1a-ba32-5b4487a2f4c8\") " pod="openshift-apiserver/apiserver-76f77b778f-nrt2l" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.927323 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/887a7f51-26e3-4d7e-99ff-884f7389171a-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-w5dnh\" (UID: \"887a7f51-26e3-4d7e-99ff-884f7389171a\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-w5dnh" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.927406 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rxxtp\" (UniqueName: \"kubernetes.io/projected/b3a97911-76f6-4098-91b6-a67368bcac8e-kube-api-access-rxxtp\") pod \"authentication-operator-69f744f599-5wmqw\" (UID: \"b3a97911-76f6-4098-91b6-a67368bcac8e\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-5wmqw" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.927482 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-p6m5g" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.927553 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b3a97911-76f6-4098-91b6-a67368bcac8e-service-ca-bundle\") pod \"authentication-operator-69f744f599-5wmqw\" (UID: \"b3a97911-76f6-4098-91b6-a67368bcac8e\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-5wmqw" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.927589 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d9f7z\" (UniqueName: \"kubernetes.io/projected/86659644-dec3-4e1a-ba32-5b4487a2f4c8-kube-api-access-d9f7z\") pod \"apiserver-76f77b778f-nrt2l\" (UID: \"86659644-dec3-4e1a-ba32-5b4487a2f4c8\") " pod="openshift-apiserver/apiserver-76f77b778f-nrt2l" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.927644 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4b74c52f-102d-45ae-a789-0c43429e8aa0-serving-cert\") pod \"openshift-config-operator-7777fb866f-f6z5s\" (UID: \"4b74c52f-102d-45ae-a789-0c43429e8aa0\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-f6z5s" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.927667 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/86659644-dec3-4e1a-ba32-5b4487a2f4c8-audit\") pod \"apiserver-76f77b778f-nrt2l\" (UID: \"86659644-dec3-4e1a-ba32-5b4487a2f4c8\") " pod="openshift-apiserver/apiserver-76f77b778f-nrt2l" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.927694 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tx6sh\" (UniqueName: \"kubernetes.io/projected/ddba54c6-0a85-4055-bc93-89e961961cd7-kube-api-access-tx6sh\") pod \"machine-approver-56656f9798-ltcjq\" (UID: \"ddba54c6-0a85-4055-bc93-89e961961cd7\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-ltcjq" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.927777 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/ddba54c6-0a85-4055-bc93-89e961961cd7-auth-proxy-config\") pod \"machine-approver-56656f9798-ltcjq\" (UID: \"ddba54c6-0a85-4055-bc93-89e961961cd7\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-ltcjq" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.929662 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.943004 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/86659644-dec3-4e1a-ba32-5b4487a2f4c8-node-pullsecrets\") pod \"apiserver-76f77b778f-nrt2l\" (UID: \"86659644-dec3-4e1a-ba32-5b4487a2f4c8\") " pod="openshift-apiserver/apiserver-76f77b778f-nrt2l" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.943948 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/86659644-dec3-4e1a-ba32-5b4487a2f4c8-image-import-ca\") pod \"apiserver-76f77b778f-nrt2l\" (UID: \"86659644-dec3-4e1a-ba32-5b4487a2f4c8\") " pod="openshift-apiserver/apiserver-76f77b778f-nrt2l" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.944882 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b3a97911-76f6-4098-91b6-a67368bcac8e-config\") pod \"authentication-operator-69f744f599-5wmqw\" (UID: \"b3a97911-76f6-4098-91b6-a67368bcac8e\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-5wmqw" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.944932 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/86659644-dec3-4e1a-ba32-5b4487a2f4c8-audit-dir\") pod \"apiserver-76f77b778f-nrt2l\" (UID: \"86659644-dec3-4e1a-ba32-5b4487a2f4c8\") " pod="openshift-apiserver/apiserver-76f77b778f-nrt2l" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.945199 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/86659644-dec3-4e1a-ba32-5b4487a2f4c8-audit\") pod \"apiserver-76f77b778f-nrt2l\" (UID: \"86659644-dec3-4e1a-ba32-5b4487a2f4c8\") " pod="openshift-apiserver/apiserver-76f77b778f-nrt2l" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.946032 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ddba54c6-0a85-4055-bc93-89e961961cd7-config\") pod \"machine-approver-56656f9798-ltcjq\" (UID: \"ddba54c6-0a85-4055-bc93-89e961961cd7\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-ltcjq" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.946795 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/4b74c52f-102d-45ae-a789-0c43429e8aa0-available-featuregates\") pod \"openshift-config-operator-7777fb866f-f6z5s\" (UID: \"4b74c52f-102d-45ae-a789-0c43429e8aa0\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-f6z5s" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.947450 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/86659644-dec3-4e1a-ba32-5b4487a2f4c8-etcd-serving-ca\") pod \"apiserver-76f77b778f-nrt2l\" (UID: \"86659644-dec3-4e1a-ba32-5b4487a2f4c8\") " pod="openshift-apiserver/apiserver-76f77b778f-nrt2l" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.948809 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b3a97911-76f6-4098-91b6-a67368bcac8e-service-ca-bundle\") pod \"authentication-operator-69f744f599-5wmqw\" (UID: \"b3a97911-76f6-4098-91b6-a67368bcac8e\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-5wmqw" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.949480 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/86659644-dec3-4e1a-ba32-5b4487a2f4c8-config\") pod \"apiserver-76f77b778f-nrt2l\" (UID: \"86659644-dec3-4e1a-ba32-5b4487a2f4c8\") " pod="openshift-apiserver/apiserver-76f77b778f-nrt2l" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.956337 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.956575 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.956618 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.956982 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.957026 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.957413 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.958063 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/86659644-dec3-4e1a-ba32-5b4487a2f4c8-trusted-ca-bundle\") pod \"apiserver-76f77b778f-nrt2l\" (UID: \"86659644-dec3-4e1a-ba32-5b4487a2f4c8\") " pod="openshift-apiserver/apiserver-76f77b778f-nrt2l" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.958618 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/887a7f51-26e3-4d7e-99ff-884f7389171a-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-w5dnh\" (UID: \"887a7f51-26e3-4d7e-99ff-884f7389171a\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-w5dnh" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.959066 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b3a97911-76f6-4098-91b6-a67368bcac8e-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-5wmqw\" (UID: \"b3a97911-76f6-4098-91b6-a67368bcac8e\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-5wmqw" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.959274 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/86659644-dec3-4e1a-ba32-5b4487a2f4c8-etcd-client\") pod \"apiserver-76f77b778f-nrt2l\" (UID: \"86659644-dec3-4e1a-ba32-5b4487a2f4c8\") " pod="openshift-apiserver/apiserver-76f77b778f-nrt2l" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.959559 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/ddba54c6-0a85-4055-bc93-89e961961cd7-machine-approver-tls\") pod \"machine-approver-56656f9798-ltcjq\" (UID: \"ddba54c6-0a85-4055-bc93-89e961961cd7\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-ltcjq" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.959669 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/887a7f51-26e3-4d7e-99ff-884f7389171a-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-w5dnh\" (UID: \"887a7f51-26e3-4d7e-99ff-884f7389171a\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-w5dnh" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.959805 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/86659644-dec3-4e1a-ba32-5b4487a2f4c8-serving-cert\") pod \"apiserver-76f77b778f-nrt2l\" (UID: \"86659644-dec3-4e1a-ba32-5b4487a2f4c8\") " pod="openshift-apiserver/apiserver-76f77b778f-nrt2l" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.959870 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/43f6f4f5-8e79-4142-95ee-84b051b27cf3-metrics-tls\") pod \"dns-operator-744455d44c-255vg\" (UID: \"43f6f4f5-8e79-4142-95ee-84b051b27cf3\") " pod="openshift-dns-operator/dns-operator-744455d44c-255vg" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.960027 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.960155 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4b74c52f-102d-45ae-a789-0c43429e8aa0-serving-cert\") pod \"openshift-config-operator-7777fb866f-f6z5s\" (UID: \"4b74c52f-102d-45ae-a789-0c43429e8aa0\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-f6z5s" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.960173 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/86659644-dec3-4e1a-ba32-5b4487a2f4c8-encryption-config\") pod \"apiserver-76f77b778f-nrt2l\" (UID: \"86659644-dec3-4e1a-ba32-5b4487a2f4c8\") " pod="openshift-apiserver/apiserver-76f77b778f-nrt2l" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.960305 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b3a97911-76f6-4098-91b6-a67368bcac8e-serving-cert\") pod \"authentication-operator-69f744f599-5wmqw\" (UID: \"b3a97911-76f6-4098-91b6-a67368bcac8e\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-5wmqw" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.960314 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.960347 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.960665 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-nrt2l"] Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.960739 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-bmd6j"] Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.960844 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.962009 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-rwlht"] Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.962066 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-8kz2s"] Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.962247 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-bmd6j" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.962613 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-8dvz9"] Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.962795 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-8kz2s" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.963194 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-8dvz9" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.963497 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-6b2dk"] Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.964019 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.964488 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-6b2dk" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.965252 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-kv2r9"] Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.965967 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-kv2r9" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.967762 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-j4xlb"] Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.967924 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.969236 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-779px"] Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.970278 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-j4xlb" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.970467 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-v88cc"] Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.970730 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-779px" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.971651 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-v88cc" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.973403 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-zm8vw"] Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.974531 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-79sb7"] Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.974674 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-zm8vw" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.975914 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-mql86"] Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.976006 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-79sb7" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.976319 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-gln66"] Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.977093 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-gln66" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.978429 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-lswml"] Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.981922 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-server-26486"] Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.982567 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-fp6bq"] Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.982593 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-4bpqt"] Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.982687 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-6xpng"] Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.982695 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-26486" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.982788 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-lswml" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.985843 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.989038 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-86kqx"] Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.990848 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-b2rb9"] Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.992897 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-nbdct"] Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.995378 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494800-86kw2"] Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.996344 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494800-86kw2" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.997821 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-canary/ingress-canary-2kbdk"] Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.998394 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2kbdk" Jan 29 12:08:15 crc kubenswrapper[4753]: I0129 12:08:15.999558 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-gkf8g"] Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.001453 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.001594 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-gln66"] Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.003740 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-6b2dk"] Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.004914 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-8kz2s"] Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.007683 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-w6vx6"] Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.008933 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-zm8vw"] Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.010275 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-w5dnh"] Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.011701 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-bmd6j"] Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.013066 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-kv2r9"] Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.014369 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/dns-default-jrblr"] Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.018606 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-jrblr" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.019141 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-v88cc"] Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.020994 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-q77m6" event={"ID":"20fbef6e-603f-424d-9bb3-46d36becb585","Type":"ContainerStarted","Data":"9577c231490e4d059d9f230382e0b55f509831ca9f46724d1d03bc0ed878d0ff"} Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.021052 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-q77m6" event={"ID":"20fbef6e-603f-424d-9bb3-46d36becb585","Type":"ContainerStarted","Data":"1a0cbdcad1a3242d47a9a45214af5ad3e6a1b80529bf27f3fdf34ffc6f67e391"} Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.021736 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.023125 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-lpjkw"] Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.025689 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-79sb7"] Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.032717 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-779px"] Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.034566 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-gntl4"] Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.036020 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-jrblr"] Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.036147 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-gntl4" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.037096 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-8dvz9"] Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.038146 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-lswml"] Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.040034 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-j4xlb"] Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.041632 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.041856 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-2kbdk"] Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.043285 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-255vg"] Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.044287 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494800-86kw2"] Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.045567 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-gntl4"] Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.062242 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.082375 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.104180 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.122209 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.142321 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.162349 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.184330 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.201457 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.222207 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.243129 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.262267 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.283613 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.302885 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.321850 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.343834 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.363341 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.588462 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.589739 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.590198 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.590416 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.590424 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.590698 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.590866 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.590933 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.590979 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.591086 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.621099 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tx6sh\" (UniqueName: \"kubernetes.io/projected/ddba54c6-0a85-4055-bc93-89e961961cd7-kube-api-access-tx6sh\") pod \"machine-approver-56656f9798-ltcjq\" (UID: \"ddba54c6-0a85-4055-bc93-89e961961cd7\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-ltcjq" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.638316 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gthjf\" (UniqueName: \"kubernetes.io/projected/887a7f51-26e3-4d7e-99ff-884f7389171a-kube-api-access-gthjf\") pod \"cluster-image-registry-operator-dc59b4c8b-w5dnh\" (UID: \"887a7f51-26e3-4d7e-99ff-884f7389171a\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-w5dnh" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.664006 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d9f7z\" (UniqueName: \"kubernetes.io/projected/86659644-dec3-4e1a-ba32-5b4487a2f4c8-kube-api-access-d9f7z\") pod \"apiserver-76f77b778f-nrt2l\" (UID: \"86659644-dec3-4e1a-ba32-5b4487a2f4c8\") " pod="openshift-apiserver/apiserver-76f77b778f-nrt2l" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.689088 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jg4jj\" (UniqueName: \"kubernetes.io/projected/43f6f4f5-8e79-4142-95ee-84b051b27cf3-kube-api-access-jg4jj\") pod \"dns-operator-744455d44c-255vg\" (UID: \"43f6f4f5-8e79-4142-95ee-84b051b27cf3\") " pod="openshift-dns-operator/dns-operator-744455d44c-255vg" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.703967 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4b79l\" (UniqueName: \"kubernetes.io/projected/4b74c52f-102d-45ae-a789-0c43429e8aa0-kube-api-access-4b79l\") pod \"openshift-config-operator-7777fb866f-f6z5s\" (UID: \"4b74c52f-102d-45ae-a789-0c43429e8aa0\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-f6z5s" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.718748 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/887a7f51-26e3-4d7e-99ff-884f7389171a-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-w5dnh\" (UID: \"887a7f51-26e3-4d7e-99ff-884f7389171a\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-w5dnh" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.739508 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rxxtp\" (UniqueName: \"kubernetes.io/projected/b3a97911-76f6-4098-91b6-a67368bcac8e-kube-api-access-rxxtp\") pod \"authentication-operator-69f744f599-5wmqw\" (UID: \"b3a97911-76f6-4098-91b6-a67368bcac8e\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-5wmqw" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.779806 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-nrt2l" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.783142 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.802947 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.824167 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.841043 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-5wmqw" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.841412 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/df61f830-b312-4a01-8d17-057799312936-trusted-ca\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.841741 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/6912f3bb-d8bc-4dc1-b3f0-9e7e1e8d354a-etcd-client\") pod \"apiserver-7bbb656c7d-p7d5d\" (UID: \"6912f3bb-d8bc-4dc1-b3f0-9e7e1e8d354a\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-p7d5d" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.841780 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/91845c42-ac54-4831-b8ac-73737902b703-console-oauth-config\") pod \"console-f9d7485db-mql86\" (UID: \"91845c42-ac54-4831-b8ac-73737902b703\") " pod="openshift-console/console-f9d7485db-mql86" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.841799 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/91845c42-ac54-4831-b8ac-73737902b703-service-ca\") pod \"console-f9d7485db-mql86\" (UID: \"91845c42-ac54-4831-b8ac-73737902b703\") " pod="openshift-console/console-f9d7485db-mql86" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.841815 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/f9d4b340-8ceb-41e1-8289-f48222e5d3d8-trusted-ca\") pod \"console-operator-58897d9998-znsp6\" (UID: \"f9d4b340-8ceb-41e1-8289-f48222e5d3d8\") " pod="openshift-console-operator/console-operator-58897d9998-znsp6" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.841846 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f9d4b340-8ceb-41e1-8289-f48222e5d3d8-serving-cert\") pod \"console-operator-58897d9998-znsp6\" (UID: \"f9d4b340-8ceb-41e1-8289-f48222e5d3d8\") " pod="openshift-console-operator/console-operator-58897d9998-znsp6" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.841862 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/9728fd7e-6203-4082-9297-2d3fd9e17b74-audit-policies\") pod \"oauth-openshift-558db77b4-zdhjn\" (UID: \"9728fd7e-6203-4082-9297-2d3fd9e17b74\") " pod="openshift-authentication/oauth-openshift-558db77b4-zdhjn" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.841883 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e0c82664-4c0e-4f03-858f-dcf68539f92a-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-qkqfh\" (UID: \"e0c82664-4c0e-4f03-858f-dcf68539f92a\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-qkqfh" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.841902 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3628b07e-d247-4487-8095-821bf56656b8-config\") pod \"controller-manager-879f6c89f-86kqx\" (UID: \"3628b07e-d247-4487-8095-821bf56656b8\") " pod="openshift-controller-manager/controller-manager-879f6c89f-86kqx" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.841976 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/df61f830-b312-4a01-8d17-057799312936-registry-tls\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.842013 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/2d66d4a7-6db6-438c-9d2b-cb2bb0ef846b-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-s4nq8\" (UID: \"2d66d4a7-6db6-438c-9d2b-cb2bb0ef846b\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-s4nq8" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.842032 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/9728fd7e-6203-4082-9297-2d3fd9e17b74-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-zdhjn\" (UID: \"9728fd7e-6203-4082-9297-2d3fd9e17b74\") " pod="openshift-authentication/oauth-openshift-558db77b4-zdhjn" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.842049 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e1a92560-535a-4f2c-9ce6-9940bfc1bf37-config\") pod \"openshift-apiserver-operator-796bbdcf4f-rwlht\" (UID: \"e1a92560-535a-4f2c-9ce6-9940bfc1bf37\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-rwlht" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.842080 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/df61f830-b312-4a01-8d17-057799312936-installation-pull-secrets\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.842127 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.842173 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/6912f3bb-d8bc-4dc1-b3f0-9e7e1e8d354a-audit-policies\") pod \"apiserver-7bbb656c7d-p7d5d\" (UID: \"6912f3bb-d8bc-4dc1-b3f0-9e7e1e8d354a\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-p7d5d" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.842213 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/9728fd7e-6203-4082-9297-2d3fd9e17b74-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-zdhjn\" (UID: \"9728fd7e-6203-4082-9297-2d3fd9e17b74\") " pod="openshift-authentication/oauth-openshift-558db77b4-zdhjn" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.842259 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e1a92560-535a-4f2c-9ce6-9940bfc1bf37-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-rwlht\" (UID: \"e1a92560-535a-4f2c-9ce6-9940bfc1bf37\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-rwlht" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.842278 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kpjl9\" (UniqueName: \"kubernetes.io/projected/91845c42-ac54-4831-b8ac-73737902b703-kube-api-access-kpjl9\") pod \"console-f9d7485db-mql86\" (UID: \"91845c42-ac54-4831-b8ac-73737902b703\") " pod="openshift-console/console-f9d7485db-mql86" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.842311 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/9728fd7e-6203-4082-9297-2d3fd9e17b74-audit-dir\") pod \"oauth-openshift-558db77b4-zdhjn\" (UID: \"9728fd7e-6203-4082-9297-2d3fd9e17b74\") " pod="openshift-authentication/oauth-openshift-558db77b4-zdhjn" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.842348 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e0c82664-4c0e-4f03-858f-dcf68539f92a-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-qkqfh\" (UID: \"e0c82664-4c0e-4f03-858f-dcf68539f92a\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-qkqfh" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.842363 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/91845c42-ac54-4831-b8ac-73737902b703-console-config\") pod \"console-f9d7485db-mql86\" (UID: \"91845c42-ac54-4831-b8ac-73737902b703\") " pod="openshift-console/console-f9d7485db-mql86" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.842381 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/2d66d4a7-6db6-438c-9d2b-cb2bb0ef846b-images\") pod \"machine-api-operator-5694c8668f-s4nq8\" (UID: \"2d66d4a7-6db6-438c-9d2b-cb2bb0ef846b\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-s4nq8" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.842419 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-47qr2\" (UniqueName: \"kubernetes.io/projected/e1a92560-535a-4f2c-9ce6-9940bfc1bf37-kube-api-access-47qr2\") pod \"openshift-apiserver-operator-796bbdcf4f-rwlht\" (UID: \"e1a92560-535a-4f2c-9ce6-9940bfc1bf37\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-rwlht" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.842437 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/91845c42-ac54-4831-b8ac-73737902b703-console-serving-cert\") pod \"console-f9d7485db-mql86\" (UID: \"91845c42-ac54-4831-b8ac-73737902b703\") " pod="openshift-console/console-f9d7485db-mql86" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.842465 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q4hkh\" (UniqueName: \"kubernetes.io/projected/6a22bc59-6a04-49b4-aa79-225e9571ee71-kube-api-access-q4hkh\") pod \"route-controller-manager-6576b87f9c-dx8db\" (UID: \"6a22bc59-6a04-49b4-aa79-225e9571ee71\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-dx8db" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.842499 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/6912f3bb-d8bc-4dc1-b3f0-9e7e1e8d354a-audit-dir\") pod \"apiserver-7bbb656c7d-p7d5d\" (UID: \"6912f3bb-d8bc-4dc1-b3f0-9e7e1e8d354a\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-p7d5d" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.842533 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/9728fd7e-6203-4082-9297-2d3fd9e17b74-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-zdhjn\" (UID: \"9728fd7e-6203-4082-9297-2d3fd9e17b74\") " pod="openshift-authentication/oauth-openshift-558db77b4-zdhjn" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.842559 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2d66d4a7-6db6-438c-9d2b-cb2bb0ef846b-config\") pod \"machine-api-operator-5694c8668f-s4nq8\" (UID: \"2d66d4a7-6db6-438c-9d2b-cb2bb0ef846b\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-s4nq8" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.842598 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/3519d984-8bed-45ca-9e6d-687bec69ee24-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-g5td2\" (UID: \"3519d984-8bed-45ca-9e6d-687bec69ee24\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-g5td2" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.842622 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6912f3bb-d8bc-4dc1-b3f0-9e7e1e8d354a-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-p7d5d\" (UID: \"6912f3bb-d8bc-4dc1-b3f0-9e7e1e8d354a\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-p7d5d" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.842659 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/91845c42-ac54-4831-b8ac-73737902b703-oauth-serving-cert\") pod \"console-f9d7485db-mql86\" (UID: \"91845c42-ac54-4831-b8ac-73737902b703\") " pod="openshift-console/console-f9d7485db-mql86" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.842687 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6a22bc59-6a04-49b4-aa79-225e9571ee71-serving-cert\") pod \"route-controller-manager-6576b87f9c-dx8db\" (UID: \"6a22bc59-6a04-49b4-aa79-225e9571ee71\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-dx8db" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.842722 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/9728fd7e-6203-4082-9297-2d3fd9e17b74-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-zdhjn\" (UID: \"9728fd7e-6203-4082-9297-2d3fd9e17b74\") " pod="openshift-authentication/oauth-openshift-558db77b4-zdhjn" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.842750 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/91845c42-ac54-4831-b8ac-73737902b703-trusted-ca-bundle\") pod \"console-f9d7485db-mql86\" (UID: \"91845c42-ac54-4831-b8ac-73737902b703\") " pod="openshift-console/console-f9d7485db-mql86" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.842785 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q4nmh\" (UniqueName: \"kubernetes.io/projected/3628b07e-d247-4487-8095-821bf56656b8-kube-api-access-q4nmh\") pod \"controller-manager-879f6c89f-86kqx\" (UID: \"3628b07e-d247-4487-8095-821bf56656b8\") " pod="openshift-controller-manager/controller-manager-879f6c89f-86kqx" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.842831 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6912f3bb-d8bc-4dc1-b3f0-9e7e1e8d354a-serving-cert\") pod \"apiserver-7bbb656c7d-p7d5d\" (UID: \"6912f3bb-d8bc-4dc1-b3f0-9e7e1e8d354a\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-p7d5d" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.842857 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/df61f830-b312-4a01-8d17-057799312936-bound-sa-token\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:16 crc kubenswrapper[4753]: E0129 12:08:16.842892 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:17.342862532 +0000 UTC m=+111.594944037 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.842951 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dds5m\" (UniqueName: \"kubernetes.io/projected/2d66d4a7-6db6-438c-9d2b-cb2bb0ef846b-kube-api-access-dds5m\") pod \"machine-api-operator-5694c8668f-s4nq8\" (UID: \"2d66d4a7-6db6-438c-9d2b-cb2bb0ef846b\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-s4nq8" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.843574 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sk4v8\" (UniqueName: \"kubernetes.io/projected/e0c82664-4c0e-4f03-858f-dcf68539f92a-kube-api-access-sk4v8\") pod \"openshift-controller-manager-operator-756b6f6bc6-qkqfh\" (UID: \"e0c82664-4c0e-4f03-858f-dcf68539f92a\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-qkqfh" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.843624 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/3628b07e-d247-4487-8095-821bf56656b8-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-86kqx\" (UID: \"3628b07e-d247-4487-8095-821bf56656b8\") " pod="openshift-controller-manager/controller-manager-879f6c89f-86kqx" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.843676 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/9728fd7e-6203-4082-9297-2d3fd9e17b74-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-zdhjn\" (UID: \"9728fd7e-6203-4082-9297-2d3fd9e17b74\") " pod="openshift-authentication/oauth-openshift-558db77b4-zdhjn" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.843736 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/9728fd7e-6203-4082-9297-2d3fd9e17b74-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-zdhjn\" (UID: \"9728fd7e-6203-4082-9297-2d3fd9e17b74\") " pod="openshift-authentication/oauth-openshift-558db77b4-zdhjn" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.843784 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-766r8\" (UniqueName: \"kubernetes.io/projected/df61f830-b312-4a01-8d17-057799312936-kube-api-access-766r8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.843816 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6a22bc59-6a04-49b4-aa79-225e9571ee71-client-ca\") pod \"route-controller-manager-6576b87f9c-dx8db\" (UID: \"6a22bc59-6a04-49b4-aa79-225e9571ee71\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-dx8db" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.843839 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5mwvb\" (UniqueName: \"kubernetes.io/projected/6912f3bb-d8bc-4dc1-b3f0-9e7e1e8d354a-kube-api-access-5mwvb\") pod \"apiserver-7bbb656c7d-p7d5d\" (UID: \"6912f3bb-d8bc-4dc1-b3f0-9e7e1e8d354a\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-p7d5d" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.844014 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/9728fd7e-6203-4082-9297-2d3fd9e17b74-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-zdhjn\" (UID: \"9728fd7e-6203-4082-9297-2d3fd9e17b74\") " pod="openshift-authentication/oauth-openshift-558db77b4-zdhjn" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.844062 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6a22bc59-6a04-49b4-aa79-225e9571ee71-config\") pod \"route-controller-manager-6576b87f9c-dx8db\" (UID: \"6a22bc59-6a04-49b4-aa79-225e9571ee71\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-dx8db" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.844092 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/df61f830-b312-4a01-8d17-057799312936-ca-trust-extracted\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.844121 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/9728fd7e-6203-4082-9297-2d3fd9e17b74-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-zdhjn\" (UID: \"9728fd7e-6203-4082-9297-2d3fd9e17b74\") " pod="openshift-authentication/oauth-openshift-558db77b4-zdhjn" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.844146 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9fdsb\" (UniqueName: \"kubernetes.io/projected/9728fd7e-6203-4082-9297-2d3fd9e17b74-kube-api-access-9fdsb\") pod \"oauth-openshift-558db77b4-zdhjn\" (UID: \"9728fd7e-6203-4082-9297-2d3fd9e17b74\") " pod="openshift-authentication/oauth-openshift-558db77b4-zdhjn" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.844177 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pfjfd\" (UniqueName: \"kubernetes.io/projected/f9d4b340-8ceb-41e1-8289-f48222e5d3d8-kube-api-access-pfjfd\") pod \"console-operator-58897d9998-znsp6\" (UID: \"f9d4b340-8ceb-41e1-8289-f48222e5d3d8\") " pod="openshift-console-operator/console-operator-58897d9998-znsp6" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.844209 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/9728fd7e-6203-4082-9297-2d3fd9e17b74-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-zdhjn\" (UID: \"9728fd7e-6203-4082-9297-2d3fd9e17b74\") " pod="openshift-authentication/oauth-openshift-558db77b4-zdhjn" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.844255 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f9d4b340-8ceb-41e1-8289-f48222e5d3d8-config\") pod \"console-operator-58897d9998-znsp6\" (UID: \"f9d4b340-8ceb-41e1-8289-f48222e5d3d8\") " pod="openshift-console-operator/console-operator-58897d9998-znsp6" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.844280 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/9728fd7e-6203-4082-9297-2d3fd9e17b74-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-zdhjn\" (UID: \"9728fd7e-6203-4082-9297-2d3fd9e17b74\") " pod="openshift-authentication/oauth-openshift-558db77b4-zdhjn" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.844307 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/df61f830-b312-4a01-8d17-057799312936-registry-certificates\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.844327 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/3628b07e-d247-4487-8095-821bf56656b8-client-ca\") pod \"controller-manager-879f6c89f-86kqx\" (UID: \"3628b07e-d247-4487-8095-821bf56656b8\") " pod="openshift-controller-manager/controller-manager-879f6c89f-86kqx" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.844346 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3628b07e-d247-4487-8095-821bf56656b8-serving-cert\") pod \"controller-manager-879f6c89f-86kqx\" (UID: \"3628b07e-d247-4487-8095-821bf56656b8\") " pod="openshift-controller-manager/controller-manager-879f6c89f-86kqx" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.844370 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qwnwx\" (UniqueName: \"kubernetes.io/projected/3519d984-8bed-45ca-9e6d-687bec69ee24-kube-api-access-qwnwx\") pod \"cluster-samples-operator-665b6dd947-g5td2\" (UID: \"3519d984-8bed-45ca-9e6d-687bec69ee24\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-g5td2" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.844393 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/9728fd7e-6203-4082-9297-2d3fd9e17b74-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-zdhjn\" (UID: \"9728fd7e-6203-4082-9297-2d3fd9e17b74\") " pod="openshift-authentication/oauth-openshift-558db77b4-zdhjn" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.844415 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/6912f3bb-d8bc-4dc1-b3f0-9e7e1e8d354a-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-p7d5d\" (UID: \"6912f3bb-d8bc-4dc1-b3f0-9e7e1e8d354a\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-p7d5d" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.844435 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/6912f3bb-d8bc-4dc1-b3f0-9e7e1e8d354a-encryption-config\") pod \"apiserver-7bbb656c7d-p7d5d\" (UID: \"6912f3bb-d8bc-4dc1-b3f0-9e7e1e8d354a\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-p7d5d" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.845559 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.862805 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.882511 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.887755 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.887779 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.908470 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-ltcjq" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.908853 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.922443 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.931985 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-255vg" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.941969 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.944711 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-w5dnh" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.945404 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.945654 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/3cea010a-6482-47b3-9c79-45432b921956-webhook-cert\") pod \"packageserver-d55dfcdfc-8kz2s\" (UID: \"3cea010a-6482-47b3-9c79-45432b921956\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-8kz2s" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.945710 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dds5m\" (UniqueName: \"kubernetes.io/projected/2d66d4a7-6db6-438c-9d2b-cb2bb0ef846b-kube-api-access-dds5m\") pod \"machine-api-operator-5694c8668f-s4nq8\" (UID: \"2d66d4a7-6db6-438c-9d2b-cb2bb0ef846b\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-s4nq8" Jan 29 12:08:16 crc kubenswrapper[4753]: E0129 12:08:16.945758 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:17.445721663 +0000 UTC m=+111.697803128 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.945810 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fd8ff4f5-94ee-4772-b6d8-d0d28d433891-config\") pod \"kube-controller-manager-operator-78b949d7b-bmd6j\" (UID: \"fd8ff4f5-94ee-4772-b6d8-d0d28d433891\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-bmd6j" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.945867 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sk4v8\" (UniqueName: \"kubernetes.io/projected/e0c82664-4c0e-4f03-858f-dcf68539f92a-kube-api-access-sk4v8\") pod \"openshift-controller-manager-operator-756b6f6bc6-qkqfh\" (UID: \"e0c82664-4c0e-4f03-858f-dcf68539f92a\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-qkqfh" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.945896 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/dad90295-65db-470c-8041-19fcf86d0439-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-lswml\" (UID: \"dad90295-65db-470c-8041-19fcf86d0439\") " pod="openshift-marketplace/marketplace-operator-79b997595-lswml" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.945980 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tfk4v\" (UniqueName: \"kubernetes.io/projected/471bd5c8-e8c1-4078-8e32-c0c30a1b237b-kube-api-access-tfk4v\") pod \"etcd-operator-b45778765-lpjkw\" (UID: \"471bd5c8-e8c1-4078-8e32-c0c30a1b237b\") " pod="openshift-etcd-operator/etcd-operator-b45778765-lpjkw" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.946002 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q8whm\" (UniqueName: \"kubernetes.io/projected/e491ff79-3c18-4cf1-9e8b-3216fc5b2e7e-kube-api-access-q8whm\") pod \"machine-config-operator-74547568cd-j4xlb\" (UID: \"e491ff79-3c18-4cf1-9e8b-3216fc5b2e7e\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-j4xlb" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.946040 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-766r8\" (UniqueName: \"kubernetes.io/projected/df61f830-b312-4a01-8d17-057799312936-kube-api-access-766r8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.946066 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/fd8ff4f5-94ee-4772-b6d8-d0d28d433891-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-bmd6j\" (UID: \"fd8ff4f5-94ee-4772-b6d8-d0d28d433891\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-bmd6j" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.946095 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/e491ff79-3c18-4cf1-9e8b-3216fc5b2e7e-auth-proxy-config\") pod \"machine-config-operator-74547568cd-j4xlb\" (UID: \"e491ff79-3c18-4cf1-9e8b-3216fc5b2e7e\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-j4xlb" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.946115 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/7a2fedca-4a36-42ec-a6b6-63fcd2de6c07-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-6b2dk\" (UID: \"7a2fedca-4a36-42ec-a6b6-63fcd2de6c07\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-6b2dk" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.946180 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bk48h\" (UniqueName: \"kubernetes.io/projected/c612cdf9-575c-4d80-9a48-93e32d5673ae-kube-api-access-bk48h\") pod \"csi-hostpathplugin-gntl4\" (UID: \"c612cdf9-575c-4d80-9a48-93e32d5673ae\") " pod="hostpath-provisioner/csi-hostpathplugin-gntl4" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.946268 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bsp9t\" (UniqueName: \"kubernetes.io/projected/b1377377-5ec9-4ff9-8550-aa1ae603a8cf-kube-api-access-bsp9t\") pod \"ingress-canary-2kbdk\" (UID: \"b1377377-5ec9-4ff9-8550-aa1ae603a8cf\") " pod="openshift-ingress-canary/ingress-canary-2kbdk" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.946305 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6a22bc59-6a04-49b4-aa79-225e9571ee71-config\") pod \"route-controller-manager-6576b87f9c-dx8db\" (UID: \"6a22bc59-6a04-49b4-aa79-225e9571ee71\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-dx8db" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.946327 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/471bd5c8-e8c1-4078-8e32-c0c30a1b237b-serving-cert\") pod \"etcd-operator-b45778765-lpjkw\" (UID: \"471bd5c8-e8c1-4078-8e32-c0c30a1b237b\") " pod="openshift-etcd-operator/etcd-operator-b45778765-lpjkw" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.946346 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/c612cdf9-575c-4d80-9a48-93e32d5673ae-plugins-dir\") pod \"csi-hostpathplugin-gntl4\" (UID: \"c612cdf9-575c-4d80-9a48-93e32d5673ae\") " pod="hostpath-provisioner/csi-hostpathplugin-gntl4" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.946368 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/40dd9f5b-066f-4400-a4a1-6a7a9eda8c90-profile-collector-cert\") pod \"catalog-operator-68c6474976-79sb7\" (UID: \"40dd9f5b-066f-4400-a4a1-6a7a9eda8c90\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-79sb7" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.946403 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/9728fd7e-6203-4082-9297-2d3fd9e17b74-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-zdhjn\" (UID: \"9728fd7e-6203-4082-9297-2d3fd9e17b74\") " pod="openshift-authentication/oauth-openshift-558db77b4-zdhjn" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.946425 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/96da6032-b04a-4eef-8170-a57f2f9b00ba-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-nbdct\" (UID: \"96da6032-b04a-4eef-8170-a57f2f9b00ba\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-nbdct" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.946444 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/330eaab5-20d9-4b5c-a8f2-8bed8d2df7b7-serving-cert\") pod \"service-ca-operator-777779d784-kv2r9\" (UID: \"330eaab5-20d9-4b5c-a8f2-8bed8d2df7b7\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-kv2r9" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.946469 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b41a4b6f-931f-41d8-86b4-a9f1193eb48a-srv-cert\") pod \"olm-operator-6b444d44fb-v88cc\" (UID: \"b41a4b6f-931f-41d8-86b4-a9f1193eb48a\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-v88cc" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.946491 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/df61f830-b312-4a01-8d17-057799312936-registry-certificates\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.946533 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/3628b07e-d247-4487-8095-821bf56656b8-client-ca\") pod \"controller-manager-879f6c89f-86kqx\" (UID: \"3628b07e-d247-4487-8095-821bf56656b8\") " pod="openshift-controller-manager/controller-manager-879f6c89f-86kqx" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.946564 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7kdx9\" (UniqueName: \"kubernetes.io/projected/6fa51f84-9cf2-491a-bc13-0b2c4b48c5c0-kube-api-access-7kdx9\") pod \"service-ca-9c57cc56f-zm8vw\" (UID: \"6fa51f84-9cf2-491a-bc13-0b2c4b48c5c0\") " pod="openshift-service-ca/service-ca-9c57cc56f-zm8vw" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.946586 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/c612cdf9-575c-4d80-9a48-93e32d5673ae-registration-dir\") pod \"csi-hostpathplugin-gntl4\" (UID: \"c612cdf9-575c-4d80-9a48-93e32d5673ae\") " pod="hostpath-provisioner/csi-hostpathplugin-gntl4" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.946631 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/279ec67d-fb6e-49a2-8339-854132f4e3ab-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-fp6bq\" (UID: \"279ec67d-fb6e-49a2-8339-854132f4e3ab\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-fp6bq" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.946737 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/279ec67d-fb6e-49a2-8339-854132f4e3ab-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-fp6bq\" (UID: \"279ec67d-fb6e-49a2-8339-854132f4e3ab\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-fp6bq" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.946792 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/6912f3bb-d8bc-4dc1-b3f0-9e7e1e8d354a-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-p7d5d\" (UID: \"6912f3bb-d8bc-4dc1-b3f0-9e7e1e8d354a\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-p7d5d" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.946817 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/6912f3bb-d8bc-4dc1-b3f0-9e7e1e8d354a-encryption-config\") pod \"apiserver-7bbb656c7d-p7d5d\" (UID: \"6912f3bb-d8bc-4dc1-b3f0-9e7e1e8d354a\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-p7d5d" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.946890 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/6912f3bb-d8bc-4dc1-b3f0-9e7e1e8d354a-etcd-client\") pod \"apiserver-7bbb656c7d-p7d5d\" (UID: \"6912f3bb-d8bc-4dc1-b3f0-9e7e1e8d354a\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-p7d5d" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.946914 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b1377377-5ec9-4ff9-8550-aa1ae603a8cf-cert\") pod \"ingress-canary-2kbdk\" (UID: \"b1377377-5ec9-4ff9-8550-aa1ae603a8cf\") " pod="openshift-ingress-canary/ingress-canary-2kbdk" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.946939 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/91845c42-ac54-4831-b8ac-73737902b703-service-ca\") pod \"console-f9d7485db-mql86\" (UID: \"91845c42-ac54-4831-b8ac-73737902b703\") " pod="openshift-console/console-f9d7485db-mql86" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.947031 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f9d4b340-8ceb-41e1-8289-f48222e5d3d8-serving-cert\") pod \"console-operator-58897d9998-znsp6\" (UID: \"f9d4b340-8ceb-41e1-8289-f48222e5d3d8\") " pod="openshift-console-operator/console-operator-58897d9998-znsp6" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.947058 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wwrp4\" (UniqueName: \"kubernetes.io/projected/40dd9f5b-066f-4400-a4a1-6a7a9eda8c90-kube-api-access-wwrp4\") pod \"catalog-operator-68c6474976-79sb7\" (UID: \"40dd9f5b-066f-4400-a4a1-6a7a9eda8c90\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-79sb7" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.947081 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/6fa51f84-9cf2-491a-bc13-0b2c4b48c5c0-signing-key\") pod \"service-ca-9c57cc56f-zm8vw\" (UID: \"6fa51f84-9cf2-491a-bc13-0b2c4b48c5c0\") " pod="openshift-service-ca/service-ca-9c57cc56f-zm8vw" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.947103 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/9728fd7e-6203-4082-9297-2d3fd9e17b74-audit-policies\") pod \"oauth-openshift-558db77b4-zdhjn\" (UID: \"9728fd7e-6203-4082-9297-2d3fd9e17b74\") " pod="openshift-authentication/oauth-openshift-558db77b4-zdhjn" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.947170 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e0c82664-4c0e-4f03-858f-dcf68539f92a-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-qkqfh\" (UID: \"e0c82664-4c0e-4f03-858f-dcf68539f92a\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-qkqfh" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.947194 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/d8fa6dd3-8d5a-4518-a95a-d1782279340d-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-gln66\" (UID: \"d8fa6dd3-8d5a-4518-a95a-d1782279340d\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-gln66" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.947234 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9rhnp\" (UniqueName: \"kubernetes.io/projected/200ffbfe-dff4-45e2-944d-34a3ad56f018-kube-api-access-9rhnp\") pod \"downloads-7954f5f757-b2rb9\" (UID: \"200ffbfe-dff4-45e2-944d-34a3ad56f018\") " pod="openshift-console/downloads-7954f5f757-b2rb9" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.947704 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/df61f830-b312-4a01-8d17-057799312936-registry-certificates\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.948124 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/3628b07e-d247-4487-8095-821bf56656b8-client-ca\") pod \"controller-manager-879f6c89f-86kqx\" (UID: \"3628b07e-d247-4487-8095-821bf56656b8\") " pod="openshift-controller-manager/controller-manager-879f6c89f-86kqx" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.948452 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/91845c42-ac54-4831-b8ac-73737902b703-service-ca\") pod \"console-f9d7485db-mql86\" (UID: \"91845c42-ac54-4831-b8ac-73737902b703\") " pod="openshift-console/console-f9d7485db-mql86" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.948775 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/9728fd7e-6203-4082-9297-2d3fd9e17b74-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-zdhjn\" (UID: \"9728fd7e-6203-4082-9297-2d3fd9e17b74\") " pod="openshift-authentication/oauth-openshift-558db77b4-zdhjn" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.948820 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8e54e1ee-a1b8-454c-8b09-e252ca3fe9da-metrics-certs\") pod \"router-default-5444994796-jpxd9\" (UID: \"8e54e1ee-a1b8-454c-8b09-e252ca3fe9da\") " pod="openshift-ingress/router-default-5444994796-jpxd9" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.948842 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d2148dab-1475-49ad-b154-99cd20f15125-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-6xpng\" (UID: \"d2148dab-1475-49ad-b154-99cd20f15125\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-6xpng" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.948984 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/2d66d4a7-6db6-438c-9d2b-cb2bb0ef846b-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-s4nq8\" (UID: \"2d66d4a7-6db6-438c-9d2b-cb2bb0ef846b\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-s4nq8" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.949536 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/9728fd7e-6203-4082-9297-2d3fd9e17b74-audit-policies\") pod \"oauth-openshift-558db77b4-zdhjn\" (UID: \"9728fd7e-6203-4082-9297-2d3fd9e17b74\") " pod="openshift-authentication/oauth-openshift-558db77b4-zdhjn" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.949670 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kpjl9\" (UniqueName: \"kubernetes.io/projected/91845c42-ac54-4831-b8ac-73737902b703-kube-api-access-kpjl9\") pod \"console-f9d7485db-mql86\" (UID: \"91845c42-ac54-4831-b8ac-73737902b703\") " pod="openshift-console/console-f9d7485db-mql86" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.949825 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fd8ff4f5-94ee-4772-b6d8-d0d28d433891-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-bmd6j\" (UID: \"fd8ff4f5-94ee-4772-b6d8-d0d28d433891\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-bmd6j" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.949953 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/dad90295-65db-470c-8041-19fcf86d0439-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-lswml\" (UID: \"dad90295-65db-470c-8041-19fcf86d0439\") " pod="openshift-marketplace/marketplace-operator-79b997595-lswml" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.950004 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/368534d6-5359-447b-afd1-2a1c16c0c5d8-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-779px\" (UID: \"368534d6-5359-447b-afd1-2a1c16c0c5d8\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-779px" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.950034 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xlt8c\" (UniqueName: \"kubernetes.io/projected/330eaab5-20d9-4b5c-a8f2-8bed8d2df7b7-kube-api-access-xlt8c\") pod \"service-ca-operator-777779d784-kv2r9\" (UID: \"330eaab5-20d9-4b5c-a8f2-8bed8d2df7b7\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-kv2r9" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.950062 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e0c82664-4c0e-4f03-858f-dcf68539f92a-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-qkqfh\" (UID: \"e0c82664-4c0e-4f03-858f-dcf68539f92a\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-qkqfh" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.950099 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/91845c42-ac54-4831-b8ac-73737902b703-console-config\") pod \"console-f9d7485db-mql86\" (UID: \"91845c42-ac54-4831-b8ac-73737902b703\") " pod="openshift-console/console-f9d7485db-mql86" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.950121 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e0c82664-4c0e-4f03-858f-dcf68539f92a-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-qkqfh\" (UID: \"e0c82664-4c0e-4f03-858f-dcf68539f92a\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-qkqfh" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.950152 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/471bd5c8-e8c1-4078-8e32-c0c30a1b237b-etcd-ca\") pod \"etcd-operator-b45778765-lpjkw\" (UID: \"471bd5c8-e8c1-4078-8e32-c0c30a1b237b\") " pod="openshift-etcd-operator/etcd-operator-b45778765-lpjkw" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.950275 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/b6e8630c-f357-4c2e-88d0-45218ea79981-bound-sa-token\") pod \"ingress-operator-5b745b69d9-4bpqt\" (UID: \"b6e8630c-f357-4c2e-88d0-45218ea79981\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-4bpqt" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.950322 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/9728fd7e-6203-4082-9297-2d3fd9e17b74-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-zdhjn\" (UID: \"9728fd7e-6203-4082-9297-2d3fd9e17b74\") " pod="openshift-authentication/oauth-openshift-558db77b4-zdhjn" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.950353 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-47qr2\" (UniqueName: \"kubernetes.io/projected/e1a92560-535a-4f2c-9ce6-9940bfc1bf37-kube-api-access-47qr2\") pod \"openshift-apiserver-operator-796bbdcf4f-rwlht\" (UID: \"e1a92560-535a-4f2c-9ce6-9940bfc1bf37\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-rwlht" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.950410 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6a22bc59-6a04-49b4-aa79-225e9571ee71-config\") pod \"route-controller-manager-6576b87f9c-dx8db\" (UID: \"6a22bc59-6a04-49b4-aa79-225e9571ee71\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-dx8db" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.950415 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/471bd5c8-e8c1-4078-8e32-c0c30a1b237b-config\") pod \"etcd-operator-b45778765-lpjkw\" (UID: \"471bd5c8-e8c1-4078-8e32-c0c30a1b237b\") " pod="openshift-etcd-operator/etcd-operator-b45778765-lpjkw" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.950469 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/96da6032-b04a-4eef-8170-a57f2f9b00ba-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-nbdct\" (UID: \"96da6032-b04a-4eef-8170-a57f2f9b00ba\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-nbdct" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.950490 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/8e54e1ee-a1b8-454c-8b09-e252ca3fe9da-default-certificate\") pod \"router-default-5444994796-jpxd9\" (UID: \"8e54e1ee-a1b8-454c-8b09-e252ca3fe9da\") " pod="openshift-ingress/router-default-5444994796-jpxd9" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.950567 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/7a2fedca-4a36-42ec-a6b6-63fcd2de6c07-proxy-tls\") pod \"machine-config-controller-84d6567774-6b2dk\" (UID: \"7a2fedca-4a36-42ec-a6b6-63fcd2de6c07\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-6b2dk" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.950600 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d2148dab-1475-49ad-b154-99cd20f15125-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-6xpng\" (UID: \"d2148dab-1475-49ad-b154-99cd20f15125\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-6xpng" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.950648 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/3519d984-8bed-45ca-9e6d-687bec69ee24-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-g5td2\" (UID: \"3519d984-8bed-45ca-9e6d-687bec69ee24\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-g5td2" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.950740 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hndgf\" (UniqueName: \"kubernetes.io/projected/368534d6-5359-447b-afd1-2a1c16c0c5d8-kube-api-access-hndgf\") pod \"package-server-manager-789f6589d5-779px\" (UID: \"368534d6-5359-447b-afd1-2a1c16c0c5d8\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-779px" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.950775 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wtgvz\" (UniqueName: \"kubernetes.io/projected/279ec67d-fb6e-49a2-8339-854132f4e3ab-kube-api-access-wtgvz\") pod \"kube-storage-version-migrator-operator-b67b599dd-fp6bq\" (UID: \"279ec67d-fb6e-49a2-8339-854132f4e3ab\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-fp6bq" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.950820 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/91845c42-ac54-4831-b8ac-73737902b703-oauth-serving-cert\") pod \"console-f9d7485db-mql86\" (UID: \"91845c42-ac54-4831-b8ac-73737902b703\") " pod="openshift-console/console-f9d7485db-mql86" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.950907 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6a22bc59-6a04-49b4-aa79-225e9571ee71-serving-cert\") pod \"route-controller-manager-6576b87f9c-dx8db\" (UID: \"6a22bc59-6a04-49b4-aa79-225e9571ee71\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-dx8db" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.950943 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4f48974c-8787-482a-b962-f7646e12952e-secret-volume\") pod \"collect-profiles-29494800-86kw2\" (UID: \"4f48974c-8787-482a-b962-f7646e12952e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494800-86kw2" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.951056 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6e8630c-f357-4c2e-88d0-45218ea79981-trusted-ca\") pod \"ingress-operator-5b745b69d9-4bpqt\" (UID: \"b6e8630c-f357-4c2e-88d0-45218ea79981\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-4bpqt" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.951089 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4f48974c-8787-482a-b962-f7646e12952e-config-volume\") pod \"collect-profiles-29494800-86kw2\" (UID: \"4f48974c-8787-482a-b962-f7646e12952e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494800-86kw2" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.951104 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/9728fd7e-6203-4082-9297-2d3fd9e17b74-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-zdhjn\" (UID: \"9728fd7e-6203-4082-9297-2d3fd9e17b74\") " pod="openshift-authentication/oauth-openshift-558db77b4-zdhjn" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.951152 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/df61f830-b312-4a01-8d17-057799312936-bound-sa-token\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.951098 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/91845c42-ac54-4831-b8ac-73737902b703-console-config\") pod \"console-f9d7485db-mql86\" (UID: \"91845c42-ac54-4831-b8ac-73737902b703\") " pod="openshift-console/console-f9d7485db-mql86" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.951314 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/3cea010a-6482-47b3-9c79-45432b921956-apiservice-cert\") pod \"packageserver-d55dfcdfc-8kz2s\" (UID: \"3cea010a-6482-47b3-9c79-45432b921956\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-8kz2s" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.951395 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e63ba05b-c0ce-43bc-b660-f5d93a3c45a5-config-volume\") pod \"dns-default-jrblr\" (UID: \"e63ba05b-c0ce-43bc-b660-f5d93a3c45a5\") " pod="openshift-dns/dns-default-jrblr" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.951478 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/9728fd7e-6203-4082-9297-2d3fd9e17b74-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-zdhjn\" (UID: \"9728fd7e-6203-4082-9297-2d3fd9e17b74\") " pod="openshift-authentication/oauth-openshift-558db77b4-zdhjn" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.951535 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/9728fd7e-6203-4082-9297-2d3fd9e17b74-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-zdhjn\" (UID: \"9728fd7e-6203-4082-9297-2d3fd9e17b74\") " pod="openshift-authentication/oauth-openshift-558db77b4-zdhjn" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.951571 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d2148dab-1475-49ad-b154-99cd20f15125-config\") pod \"kube-apiserver-operator-766d6c64bb-6xpng\" (UID: \"d2148dab-1475-49ad-b154-99cd20f15125\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-6xpng" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.951595 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/3628b07e-d247-4487-8095-821bf56656b8-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-86kqx\" (UID: \"3628b07e-d247-4487-8095-821bf56656b8\") " pod="openshift-controller-manager/controller-manager-879f6c89f-86kqx" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.951634 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6a22bc59-6a04-49b4-aa79-225e9571ee71-client-ca\") pod \"route-controller-manager-6576b87f9c-dx8db\" (UID: \"6a22bc59-6a04-49b4-aa79-225e9571ee71\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-dx8db" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.951738 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5mwvb\" (UniqueName: \"kubernetes.io/projected/6912f3bb-d8bc-4dc1-b3f0-9e7e1e8d354a-kube-api-access-5mwvb\") pod \"apiserver-7bbb656c7d-p7d5d\" (UID: \"6912f3bb-d8bc-4dc1-b3f0-9e7e1e8d354a\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-p7d5d" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.951769 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/40dd9f5b-066f-4400-a4a1-6a7a9eda8c90-srv-cert\") pod \"catalog-operator-68c6474976-79sb7\" (UID: \"40dd9f5b-066f-4400-a4a1-6a7a9eda8c90\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-79sb7" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.951835 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sxd8k\" (UniqueName: \"kubernetes.io/projected/b41a4b6f-931f-41d8-86b4-a9f1193eb48a-kube-api-access-sxd8k\") pod \"olm-operator-6b444d44fb-v88cc\" (UID: \"b41a4b6f-931f-41d8-86b4-a9f1193eb48a\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-v88cc" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.951852 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/91845c42-ac54-4831-b8ac-73737902b703-oauth-serving-cert\") pod \"console-f9d7485db-mql86\" (UID: \"91845c42-ac54-4831-b8ac-73737902b703\") " pod="openshift-console/console-f9d7485db-mql86" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.951859 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zpwp6\" (UniqueName: \"kubernetes.io/projected/8e54e1ee-a1b8-454c-8b09-e252ca3fe9da-kube-api-access-zpwp6\") pod \"router-default-5444994796-jpxd9\" (UID: \"8e54e1ee-a1b8-454c-8b09-e252ca3fe9da\") " pod="openshift-ingress/router-default-5444994796-jpxd9" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.951922 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/96da6032-b04a-4eef-8170-a57f2f9b00ba-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-nbdct\" (UID: \"96da6032-b04a-4eef-8170-a57f2f9b00ba\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-nbdct" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.951947 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kpq58\" (UniqueName: \"kubernetes.io/projected/8fd9a883-0e5e-424f-bc31-199f2103c548-kube-api-access-kpq58\") pod \"control-plane-machine-set-operator-78cbb6b69f-8dvz9\" (UID: \"8fd9a883-0e5e-424f-bc31-199f2103c548\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-8dvz9" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.951976 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/9728fd7e-6203-4082-9297-2d3fd9e17b74-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-zdhjn\" (UID: \"9728fd7e-6203-4082-9297-2d3fd9e17b74\") " pod="openshift-authentication/oauth-openshift-558db77b4-zdhjn" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.952008 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f5dcg\" (UniqueName: \"kubernetes.io/projected/d379e39e-ec0e-4419-8ad9-3deae28a5729-kube-api-access-f5dcg\") pod \"machine-config-server-26486\" (UID: \"d379e39e-ec0e-4419-8ad9-3deae28a5729\") " pod="openshift-machine-config-operator/machine-config-server-26486" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.952045 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/df61f830-b312-4a01-8d17-057799312936-ca-trust-extracted\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.952066 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/9728fd7e-6203-4082-9297-2d3fd9e17b74-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-zdhjn\" (UID: \"9728fd7e-6203-4082-9297-2d3fd9e17b74\") " pod="openshift-authentication/oauth-openshift-558db77b4-zdhjn" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.952146 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9fdsb\" (UniqueName: \"kubernetes.io/projected/9728fd7e-6203-4082-9297-2d3fd9e17b74-kube-api-access-9fdsb\") pod \"oauth-openshift-558db77b4-zdhjn\" (UID: \"9728fd7e-6203-4082-9297-2d3fd9e17b74\") " pod="openshift-authentication/oauth-openshift-558db77b4-zdhjn" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.952166 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pfjfd\" (UniqueName: \"kubernetes.io/projected/f9d4b340-8ceb-41e1-8289-f48222e5d3d8-kube-api-access-pfjfd\") pod \"console-operator-58897d9998-znsp6\" (UID: \"f9d4b340-8ceb-41e1-8289-f48222e5d3d8\") " pod="openshift-console-operator/console-operator-58897d9998-znsp6" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.952204 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4ghtk\" (UniqueName: \"kubernetes.io/projected/3cea010a-6482-47b3-9c79-45432b921956-kube-api-access-4ghtk\") pod \"packageserver-d55dfcdfc-8kz2s\" (UID: \"3cea010a-6482-47b3-9c79-45432b921956\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-8kz2s" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.952243 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/471bd5c8-e8c1-4078-8e32-c0c30a1b237b-etcd-service-ca\") pod \"etcd-operator-b45778765-lpjkw\" (UID: \"471bd5c8-e8c1-4078-8e32-c0c30a1b237b\") " pod="openshift-etcd-operator/etcd-operator-b45778765-lpjkw" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.952265 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/471bd5c8-e8c1-4078-8e32-c0c30a1b237b-etcd-client\") pod \"etcd-operator-b45778765-lpjkw\" (UID: \"471bd5c8-e8c1-4078-8e32-c0c30a1b237b\") " pod="openshift-etcd-operator/etcd-operator-b45778765-lpjkw" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.952318 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8t92k\" (UniqueName: \"kubernetes.io/projected/dad90295-65db-470c-8041-19fcf86d0439-kube-api-access-8t92k\") pod \"marketplace-operator-79b997595-lswml\" (UID: \"dad90295-65db-470c-8041-19fcf86d0439\") " pod="openshift-marketplace/marketplace-operator-79b997595-lswml" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.952340 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f9d4b340-8ceb-41e1-8289-f48222e5d3d8-config\") pod \"console-operator-58897d9998-znsp6\" (UID: \"f9d4b340-8ceb-41e1-8289-f48222e5d3d8\") " pod="openshift-console-operator/console-operator-58897d9998-znsp6" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.952359 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dpx95\" (UniqueName: \"kubernetes.io/projected/7a2fedca-4a36-42ec-a6b6-63fcd2de6c07-kube-api-access-dpx95\") pod \"machine-config-controller-84d6567774-6b2dk\" (UID: \"7a2fedca-4a36-42ec-a6b6-63fcd2de6c07\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-6b2dk" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.952383 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bjvsq\" (UniqueName: \"kubernetes.io/projected/8fe1a105-96fa-4847-97a5-3d83d04a86ec-kube-api-access-bjvsq\") pod \"migrator-59844c95c7-gkf8g\" (UID: \"8fe1a105-96fa-4847-97a5-3d83d04a86ec\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-gkf8g" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.952404 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8e54e1ee-a1b8-454c-8b09-e252ca3fe9da-service-ca-bundle\") pod \"router-default-5444994796-jpxd9\" (UID: \"8e54e1ee-a1b8-454c-8b09-e252ca3fe9da\") " pod="openshift-ingress/router-default-5444994796-jpxd9" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.952424 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/330eaab5-20d9-4b5c-a8f2-8bed8d2df7b7-config\") pod \"service-ca-operator-777779d784-kv2r9\" (UID: \"330eaab5-20d9-4b5c-a8f2-8bed8d2df7b7\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-kv2r9" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.952533 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/9728fd7e-6203-4082-9297-2d3fd9e17b74-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-zdhjn\" (UID: \"9728fd7e-6203-4082-9297-2d3fd9e17b74\") " pod="openshift-authentication/oauth-openshift-558db77b4-zdhjn" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.952559 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3628b07e-d247-4487-8095-821bf56656b8-serving-cert\") pod \"controller-manager-879f6c89f-86kqx\" (UID: \"3628b07e-d247-4487-8095-821bf56656b8\") " pod="openshift-controller-manager/controller-manager-879f6c89f-86kqx" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.952572 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6a22bc59-6a04-49b4-aa79-225e9571ee71-client-ca\") pod \"route-controller-manager-6576b87f9c-dx8db\" (UID: \"6a22bc59-6a04-49b4-aa79-225e9571ee71\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-dx8db" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.952596 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qwnwx\" (UniqueName: \"kubernetes.io/projected/3519d984-8bed-45ca-9e6d-687bec69ee24-kube-api-access-qwnwx\") pod \"cluster-samples-operator-665b6dd947-g5td2\" (UID: \"3519d984-8bed-45ca-9e6d-687bec69ee24\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-g5td2" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.952614 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/9728fd7e-6203-4082-9297-2d3fd9e17b74-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-zdhjn\" (UID: \"9728fd7e-6203-4082-9297-2d3fd9e17b74\") " pod="openshift-authentication/oauth-openshift-558db77b4-zdhjn" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.952634 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/8e54e1ee-a1b8-454c-8b09-e252ca3fe9da-stats-auth\") pod \"router-default-5444994796-jpxd9\" (UID: \"8e54e1ee-a1b8-454c-8b09-e252ca3fe9da\") " pod="openshift-ingress/router-default-5444994796-jpxd9" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.952657 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/c612cdf9-575c-4d80-9a48-93e32d5673ae-socket-dir\") pod \"csi-hostpathplugin-gntl4\" (UID: \"c612cdf9-575c-4d80-9a48-93e32d5673ae\") " pod="hostpath-provisioner/csi-hostpathplugin-gntl4" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.952676 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rx9j4\" (UniqueName: \"kubernetes.io/projected/d8fa6dd3-8d5a-4518-a95a-d1782279340d-kube-api-access-rx9j4\") pod \"multus-admission-controller-857f4d67dd-gln66\" (UID: \"d8fa6dd3-8d5a-4518-a95a-d1782279340d\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-gln66" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.952698 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4cwgg\" (UniqueName: \"kubernetes.io/projected/4f48974c-8787-482a-b962-f7646e12952e-kube-api-access-4cwgg\") pod \"collect-profiles-29494800-86kw2\" (UID: \"4f48974c-8787-482a-b962-f7646e12952e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494800-86kw2" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.952725 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/df61f830-b312-4a01-8d17-057799312936-trusted-ca\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.952746 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b41a4b6f-931f-41d8-86b4-a9f1193eb48a-profile-collector-cert\") pod \"olm-operator-6b444d44fb-v88cc\" (UID: \"b41a4b6f-931f-41d8-86b4-a9f1193eb48a\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-v88cc" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.952766 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/6fa51f84-9cf2-491a-bc13-0b2c4b48c5c0-signing-cabundle\") pod \"service-ca-9c57cc56f-zm8vw\" (UID: \"6fa51f84-9cf2-491a-bc13-0b2c4b48c5c0\") " pod="openshift-service-ca/service-ca-9c57cc56f-zm8vw" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.952789 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/91845c42-ac54-4831-b8ac-73737902b703-console-oauth-config\") pod \"console-f9d7485db-mql86\" (UID: \"91845c42-ac54-4831-b8ac-73737902b703\") " pod="openshift-console/console-f9d7485db-mql86" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.952808 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/d379e39e-ec0e-4419-8ad9-3deae28a5729-certs\") pod \"machine-config-server-26486\" (UID: \"d379e39e-ec0e-4419-8ad9-3deae28a5729\") " pod="openshift-machine-config-operator/machine-config-server-26486" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.952832 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/b6e8630c-f357-4c2e-88d0-45218ea79981-metrics-tls\") pod \"ingress-operator-5b745b69d9-4bpqt\" (UID: \"b6e8630c-f357-4c2e-88d0-45218ea79981\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-4bpqt" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.952955 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/3628b07e-d247-4487-8095-821bf56656b8-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-86kqx\" (UID: \"3628b07e-d247-4487-8095-821bf56656b8\") " pod="openshift-controller-manager/controller-manager-879f6c89f-86kqx" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.952972 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/f9d4b340-8ceb-41e1-8289-f48222e5d3d8-trusted-ca\") pod \"console-operator-58897d9998-znsp6\" (UID: \"f9d4b340-8ceb-41e1-8289-f48222e5d3d8\") " pod="openshift-console-operator/console-operator-58897d9998-znsp6" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.952998 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-spxzk\" (UniqueName: \"kubernetes.io/projected/e63ba05b-c0ce-43bc-b660-f5d93a3c45a5-kube-api-access-spxzk\") pod \"dns-default-jrblr\" (UID: \"e63ba05b-c0ce-43bc-b660-f5d93a3c45a5\") " pod="openshift-dns/dns-default-jrblr" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.953047 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3628b07e-d247-4487-8095-821bf56656b8-config\") pod \"controller-manager-879f6c89f-86kqx\" (UID: \"3628b07e-d247-4487-8095-821bf56656b8\") " pod="openshift-controller-manager/controller-manager-879f6c89f-86kqx" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.953074 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/df61f830-b312-4a01-8d17-057799312936-registry-tls\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.953096 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/3cea010a-6482-47b3-9c79-45432b921956-tmpfs\") pod \"packageserver-d55dfcdfc-8kz2s\" (UID: \"3cea010a-6482-47b3-9c79-45432b921956\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-8kz2s" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.953138 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/d379e39e-ec0e-4419-8ad9-3deae28a5729-node-bootstrap-token\") pod \"machine-config-server-26486\" (UID: \"d379e39e-ec0e-4419-8ad9-3deae28a5729\") " pod="openshift-machine-config-operator/machine-config-server-26486" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.953160 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/8fd9a883-0e5e-424f-bc31-199f2103c548-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-8dvz9\" (UID: \"8fd9a883-0e5e-424f-bc31-199f2103c548\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-8dvz9" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.953181 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c2q59\" (UniqueName: \"kubernetes.io/projected/b6e8630c-f357-4c2e-88d0-45218ea79981-kube-api-access-c2q59\") pod \"ingress-operator-5b745b69d9-4bpqt\" (UID: \"b6e8630c-f357-4c2e-88d0-45218ea79981\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-4bpqt" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.953202 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/df61f830-b312-4a01-8d17-057799312936-installation-pull-secrets\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.953240 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e1a92560-535a-4f2c-9ce6-9940bfc1bf37-config\") pod \"openshift-apiserver-operator-796bbdcf4f-rwlht\" (UID: \"e1a92560-535a-4f2c-9ce6-9940bfc1bf37\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-rwlht" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.953272 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/6912f3bb-d8bc-4dc1-b3f0-9e7e1e8d354a-audit-policies\") pod \"apiserver-7bbb656c7d-p7d5d\" (UID: \"6912f3bb-d8bc-4dc1-b3f0-9e7e1e8d354a\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-p7d5d" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.953297 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/9728fd7e-6203-4082-9297-2d3fd9e17b74-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-zdhjn\" (UID: \"9728fd7e-6203-4082-9297-2d3fd9e17b74\") " pod="openshift-authentication/oauth-openshift-558db77b4-zdhjn" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.953323 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e1a92560-535a-4f2c-9ce6-9940bfc1bf37-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-rwlht\" (UID: \"e1a92560-535a-4f2c-9ce6-9940bfc1bf37\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-rwlht" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.953355 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/e491ff79-3c18-4cf1-9e8b-3216fc5b2e7e-images\") pod \"machine-config-operator-74547568cd-j4xlb\" (UID: \"e491ff79-3c18-4cf1-9e8b-3216fc5b2e7e\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-j4xlb" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.953378 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/e491ff79-3c18-4cf1-9e8b-3216fc5b2e7e-proxy-tls\") pod \"machine-config-operator-74547568cd-j4xlb\" (UID: \"e491ff79-3c18-4cf1-9e8b-3216fc5b2e7e\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-j4xlb" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.953424 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.953451 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/c612cdf9-575c-4d80-9a48-93e32d5673ae-mountpoint-dir\") pod \"csi-hostpathplugin-gntl4\" (UID: \"c612cdf9-575c-4d80-9a48-93e32d5673ae\") " pod="hostpath-provisioner/csi-hostpathplugin-gntl4" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.953625 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/c612cdf9-575c-4d80-9a48-93e32d5673ae-csi-data-dir\") pod \"csi-hostpathplugin-gntl4\" (UID: \"c612cdf9-575c-4d80-9a48-93e32d5673ae\") " pod="hostpath-provisioner/csi-hostpathplugin-gntl4" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.953672 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/9728fd7e-6203-4082-9297-2d3fd9e17b74-audit-dir\") pod \"oauth-openshift-558db77b4-zdhjn\" (UID: \"9728fd7e-6203-4082-9297-2d3fd9e17b74\") " pod="openshift-authentication/oauth-openshift-558db77b4-zdhjn" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.953738 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/2d66d4a7-6db6-438c-9d2b-cb2bb0ef846b-images\") pod \"machine-api-operator-5694c8668f-s4nq8\" (UID: \"2d66d4a7-6db6-438c-9d2b-cb2bb0ef846b\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-s4nq8" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.953831 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/91845c42-ac54-4831-b8ac-73737902b703-console-serving-cert\") pod \"console-f9d7485db-mql86\" (UID: \"91845c42-ac54-4831-b8ac-73737902b703\") " pod="openshift-console/console-f9d7485db-mql86" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.953887 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q4hkh\" (UniqueName: \"kubernetes.io/projected/6a22bc59-6a04-49b4-aa79-225e9571ee71-kube-api-access-q4hkh\") pod \"route-controller-manager-6576b87f9c-dx8db\" (UID: \"6a22bc59-6a04-49b4-aa79-225e9571ee71\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-dx8db" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.953925 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/6912f3bb-d8bc-4dc1-b3f0-9e7e1e8d354a-audit-dir\") pod \"apiserver-7bbb656c7d-p7d5d\" (UID: \"6912f3bb-d8bc-4dc1-b3f0-9e7e1e8d354a\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-p7d5d" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.953974 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2d66d4a7-6db6-438c-9d2b-cb2bb0ef846b-config\") pod \"machine-api-operator-5694c8668f-s4nq8\" (UID: \"2d66d4a7-6db6-438c-9d2b-cb2bb0ef846b\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-s4nq8" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.954028 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6912f3bb-d8bc-4dc1-b3f0-9e7e1e8d354a-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-p7d5d\" (UID: \"6912f3bb-d8bc-4dc1-b3f0-9e7e1e8d354a\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-p7d5d" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.954121 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/e63ba05b-c0ce-43bc-b660-f5d93a3c45a5-metrics-tls\") pod \"dns-default-jrblr\" (UID: \"e63ba05b-c0ce-43bc-b660-f5d93a3c45a5\") " pod="openshift-dns/dns-default-jrblr" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.954160 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/91845c42-ac54-4831-b8ac-73737902b703-trusted-ca-bundle\") pod \"console-f9d7485db-mql86\" (UID: \"91845c42-ac54-4831-b8ac-73737902b703\") " pod="openshift-console/console-f9d7485db-mql86" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.954186 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q4nmh\" (UniqueName: \"kubernetes.io/projected/3628b07e-d247-4487-8095-821bf56656b8-kube-api-access-q4nmh\") pod \"controller-manager-879f6c89f-86kqx\" (UID: \"3628b07e-d247-4487-8095-821bf56656b8\") " pod="openshift-controller-manager/controller-manager-879f6c89f-86kqx" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.954196 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/9728fd7e-6203-4082-9297-2d3fd9e17b74-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-zdhjn\" (UID: \"9728fd7e-6203-4082-9297-2d3fd9e17b74\") " pod="openshift-authentication/oauth-openshift-558db77b4-zdhjn" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.954212 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/9728fd7e-6203-4082-9297-2d3fd9e17b74-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-zdhjn\" (UID: \"9728fd7e-6203-4082-9297-2d3fd9e17b74\") " pod="openshift-authentication/oauth-openshift-558db77b4-zdhjn" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.954257 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6912f3bb-d8bc-4dc1-b3f0-9e7e1e8d354a-serving-cert\") pod \"apiserver-7bbb656c7d-p7d5d\" (UID: \"6912f3bb-d8bc-4dc1-b3f0-9e7e1e8d354a\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-p7d5d" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.954800 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e1a92560-535a-4f2c-9ce6-9940bfc1bf37-config\") pod \"openshift-apiserver-operator-796bbdcf4f-rwlht\" (UID: \"e1a92560-535a-4f2c-9ce6-9940bfc1bf37\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-rwlht" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.955108 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/9728fd7e-6203-4082-9297-2d3fd9e17b74-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-zdhjn\" (UID: \"9728fd7e-6203-4082-9297-2d3fd9e17b74\") " pod="openshift-authentication/oauth-openshift-558db77b4-zdhjn" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.955542 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/6912f3bb-d8bc-4dc1-b3f0-9e7e1e8d354a-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-p7d5d\" (UID: \"6912f3bb-d8bc-4dc1-b3f0-9e7e1e8d354a\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-p7d5d" Jan 29 12:08:16 crc kubenswrapper[4753]: I0129 12:08:16.956354 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/df61f830-b312-4a01-8d17-057799312936-trusted-ca\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:16.957257 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/9728fd7e-6203-4082-9297-2d3fd9e17b74-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-zdhjn\" (UID: \"9728fd7e-6203-4082-9297-2d3fd9e17b74\") " pod="openshift-authentication/oauth-openshift-558db77b4-zdhjn" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:16.957621 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/f9d4b340-8ceb-41e1-8289-f48222e5d3d8-trusted-ca\") pod \"console-operator-58897d9998-znsp6\" (UID: \"f9d4b340-8ceb-41e1-8289-f48222e5d3d8\") " pod="openshift-console-operator/console-operator-58897d9998-znsp6" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:16.957735 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/df61f830-b312-4a01-8d17-057799312936-installation-pull-secrets\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:16.960191 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/9728fd7e-6203-4082-9297-2d3fd9e17b74-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-zdhjn\" (UID: \"9728fd7e-6203-4082-9297-2d3fd9e17b74\") " pod="openshift-authentication/oauth-openshift-558db77b4-zdhjn" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:16.960487 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3628b07e-d247-4487-8095-821bf56656b8-serving-cert\") pod \"controller-manager-879f6c89f-86kqx\" (UID: \"3628b07e-d247-4487-8095-821bf56656b8\") " pod="openshift-controller-manager/controller-manager-879f6c89f-86kqx" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.137132 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/6912f3bb-d8bc-4dc1-b3f0-9e7e1e8d354a-audit-dir\") pod \"apiserver-7bbb656c7d-p7d5d\" (UID: \"6912f3bb-d8bc-4dc1-b3f0-9e7e1e8d354a\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-p7d5d" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.138161 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2d66d4a7-6db6-438c-9d2b-cb2bb0ef846b-config\") pod \"machine-api-operator-5694c8668f-s4nq8\" (UID: \"2d66d4a7-6db6-438c-9d2b-cb2bb0ef846b\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-s4nq8" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.138708 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6912f3bb-d8bc-4dc1-b3f0-9e7e1e8d354a-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-p7d5d\" (UID: \"6912f3bb-d8bc-4dc1-b3f0-9e7e1e8d354a\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-p7d5d" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.143280 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e0c82664-4c0e-4f03-858f-dcf68539f92a-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-qkqfh\" (UID: \"e0c82664-4c0e-4f03-858f-dcf68539f92a\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-qkqfh" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.143279 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/9728fd7e-6203-4082-9297-2d3fd9e17b74-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-zdhjn\" (UID: \"9728fd7e-6203-4082-9297-2d3fd9e17b74\") " pod="openshift-authentication/oauth-openshift-558db77b4-zdhjn" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.143587 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/6912f3bb-d8bc-4dc1-b3f0-9e7e1e8d354a-encryption-config\") pod \"apiserver-7bbb656c7d-p7d5d\" (UID: \"6912f3bb-d8bc-4dc1-b3f0-9e7e1e8d354a\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-p7d5d" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.143939 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f9d4b340-8ceb-41e1-8289-f48222e5d3d8-serving-cert\") pod \"console-operator-58897d9998-znsp6\" (UID: \"f9d4b340-8ceb-41e1-8289-f48222e5d3d8\") " pod="openshift-console-operator/console-operator-58897d9998-znsp6" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.143989 4753 request.go:700] Waited for 1.173238901s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-machine-config-operator/secrets?fieldSelector=metadata.name%3Dmco-proxy-tls&limit=500&resourceVersion=0 Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.144213 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-f6z5s" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.145236 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/9728fd7e-6203-4082-9297-2d3fd9e17b74-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-zdhjn\" (UID: \"9728fd7e-6203-4082-9297-2d3fd9e17b74\") " pod="openshift-authentication/oauth-openshift-558db77b4-zdhjn" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.145958 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/6912f3bb-d8bc-4dc1-b3f0-9e7e1e8d354a-audit-policies\") pod \"apiserver-7bbb656c7d-p7d5d\" (UID: \"6912f3bb-d8bc-4dc1-b3f0-9e7e1e8d354a\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-p7d5d" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.145994 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/9728fd7e-6203-4082-9297-2d3fd9e17b74-audit-dir\") pod \"oauth-openshift-558db77b4-zdhjn\" (UID: \"9728fd7e-6203-4082-9297-2d3fd9e17b74\") " pod="openshift-authentication/oauth-openshift-558db77b4-zdhjn" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.146826 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/2d66d4a7-6db6-438c-9d2b-cb2bb0ef846b-images\") pod \"machine-api-operator-5694c8668f-s4nq8\" (UID: \"2d66d4a7-6db6-438c-9d2b-cb2bb0ef846b\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-s4nq8" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.150925 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.151263 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.151395 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.151571 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.151791 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.151831 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/9728fd7e-6203-4082-9297-2d3fd9e17b74-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-zdhjn\" (UID: \"9728fd7e-6203-4082-9297-2d3fd9e17b74\") " pod="openshift-authentication/oauth-openshift-558db77b4-zdhjn" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.153111 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/6912f3bb-d8bc-4dc1-b3f0-9e7e1e8d354a-etcd-client\") pod \"apiserver-7bbb656c7d-p7d5d\" (UID: \"6912f3bb-d8bc-4dc1-b3f0-9e7e1e8d354a\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-p7d5d" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.153196 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/df61f830-b312-4a01-8d17-057799312936-ca-trust-extracted\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.154081 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/96da6032-b04a-4eef-8170-a57f2f9b00ba-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-nbdct\" (UID: \"96da6032-b04a-4eef-8170-a57f2f9b00ba\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-nbdct" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.154117 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/330eaab5-20d9-4b5c-a8f2-8bed8d2df7b7-serving-cert\") pod \"service-ca-operator-777779d784-kv2r9\" (UID: \"330eaab5-20d9-4b5c-a8f2-8bed8d2df7b7\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-kv2r9" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.154140 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b41a4b6f-931f-41d8-86b4-a9f1193eb48a-srv-cert\") pod \"olm-operator-6b444d44fb-v88cc\" (UID: \"b41a4b6f-931f-41d8-86b4-a9f1193eb48a\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-v88cc" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.155472 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.155803 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.156018 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.158041 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e1a92560-535a-4f2c-9ce6-9940bfc1bf37-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-rwlht\" (UID: \"e1a92560-535a-4f2c-9ce6-9940bfc1bf37\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-rwlht" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.158795 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f9d4b340-8ceb-41e1-8289-f48222e5d3d8-config\") pod \"console-operator-58897d9998-znsp6\" (UID: \"f9d4b340-8ceb-41e1-8289-f48222e5d3d8\") " pod="openshift-console-operator/console-operator-58897d9998-znsp6" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.159811 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3628b07e-d247-4487-8095-821bf56656b8-config\") pod \"controller-manager-879f6c89f-86kqx\" (UID: \"3628b07e-d247-4487-8095-821bf56656b8\") " pod="openshift-controller-manager/controller-manager-879f6c89f-86kqx" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.159835 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7kdx9\" (UniqueName: \"kubernetes.io/projected/6fa51f84-9cf2-491a-bc13-0b2c4b48c5c0-kube-api-access-7kdx9\") pod \"service-ca-9c57cc56f-zm8vw\" (UID: \"6fa51f84-9cf2-491a-bc13-0b2c4b48c5c0\") " pod="openshift-service-ca/service-ca-9c57cc56f-zm8vw" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.160408 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/c612cdf9-575c-4d80-9a48-93e32d5673ae-registration-dir\") pod \"csi-hostpathplugin-gntl4\" (UID: \"c612cdf9-575c-4d80-9a48-93e32d5673ae\") " pod="hostpath-provisioner/csi-hostpathplugin-gntl4" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.160535 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/279ec67d-fb6e-49a2-8339-854132f4e3ab-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-fp6bq\" (UID: \"279ec67d-fb6e-49a2-8339-854132f4e3ab\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-fp6bq" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.160584 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/279ec67d-fb6e-49a2-8339-854132f4e3ab-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-fp6bq\" (UID: \"279ec67d-fb6e-49a2-8339-854132f4e3ab\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-fp6bq" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.163216 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/279ec67d-fb6e-49a2-8339-854132f4e3ab-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-fp6bq\" (UID: \"279ec67d-fb6e-49a2-8339-854132f4e3ab\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-fp6bq" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.163459 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b1377377-5ec9-4ff9-8550-aa1ae603a8cf-cert\") pod \"ingress-canary-2kbdk\" (UID: \"b1377377-5ec9-4ff9-8550-aa1ae603a8cf\") " pod="openshift-ingress-canary/ingress-canary-2kbdk" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.164184 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/c612cdf9-575c-4d80-9a48-93e32d5673ae-registration-dir\") pod \"csi-hostpathplugin-gntl4\" (UID: \"c612cdf9-575c-4d80-9a48-93e32d5673ae\") " pod="hostpath-provisioner/csi-hostpathplugin-gntl4" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.164377 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/96da6032-b04a-4eef-8170-a57f2f9b00ba-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-nbdct\" (UID: \"96da6032-b04a-4eef-8170-a57f2f9b00ba\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-nbdct" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.164577 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wwrp4\" (UniqueName: \"kubernetes.io/projected/40dd9f5b-066f-4400-a4a1-6a7a9eda8c90-kube-api-access-wwrp4\") pod \"catalog-operator-68c6474976-79sb7\" (UID: \"40dd9f5b-066f-4400-a4a1-6a7a9eda8c90\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-79sb7" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.164614 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/6fa51f84-9cf2-491a-bc13-0b2c4b48c5c0-signing-key\") pod \"service-ca-9c57cc56f-zm8vw\" (UID: \"6fa51f84-9cf2-491a-bc13-0b2c4b48c5c0\") " pod="openshift-service-ca/service-ca-9c57cc56f-zm8vw" Jan 29 12:08:17 crc kubenswrapper[4753]: E0129 12:08:17.164865 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:17.6648458 +0000 UTC m=+111.916927315 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.164907 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/d8fa6dd3-8d5a-4518-a95a-d1782279340d-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-gln66\" (UID: \"d8fa6dd3-8d5a-4518-a95a-d1782279340d\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-gln66" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.164948 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9rhnp\" (UniqueName: \"kubernetes.io/projected/200ffbfe-dff4-45e2-944d-34a3ad56f018-kube-api-access-9rhnp\") pod \"downloads-7954f5f757-b2rb9\" (UID: \"200ffbfe-dff4-45e2-944d-34a3ad56f018\") " pod="openshift-console/downloads-7954f5f757-b2rb9" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.165058 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d2148dab-1475-49ad-b154-99cd20f15125-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-6xpng\" (UID: \"d2148dab-1475-49ad-b154-99cd20f15125\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-6xpng" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.165278 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8e54e1ee-a1b8-454c-8b09-e252ca3fe9da-metrics-certs\") pod \"router-default-5444994796-jpxd9\" (UID: \"8e54e1ee-a1b8-454c-8b09-e252ca3fe9da\") " pod="openshift-ingress/router-default-5444994796-jpxd9" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.172886 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/9728fd7e-6203-4082-9297-2d3fd9e17b74-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-zdhjn\" (UID: \"9728fd7e-6203-4082-9297-2d3fd9e17b74\") " pod="openshift-authentication/oauth-openshift-558db77b4-zdhjn" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.173265 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/9728fd7e-6203-4082-9297-2d3fd9e17b74-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-zdhjn\" (UID: \"9728fd7e-6203-4082-9297-2d3fd9e17b74\") " pod="openshift-authentication/oauth-openshift-558db77b4-zdhjn" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.173821 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/3519d984-8bed-45ca-9e6d-687bec69ee24-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-g5td2\" (UID: \"3519d984-8bed-45ca-9e6d-687bec69ee24\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-g5td2" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.174341 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/2d66d4a7-6db6-438c-9d2b-cb2bb0ef846b-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-s4nq8\" (UID: \"2d66d4a7-6db6-438c-9d2b-cb2bb0ef846b\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-s4nq8" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.174861 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6912f3bb-d8bc-4dc1-b3f0-9e7e1e8d354a-serving-cert\") pod \"apiserver-7bbb656c7d-p7d5d\" (UID: \"6912f3bb-d8bc-4dc1-b3f0-9e7e1e8d354a\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-p7d5d" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.175325 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6a22bc59-6a04-49b4-aa79-225e9571ee71-serving-cert\") pod \"route-controller-manager-6576b87f9c-dx8db\" (UID: \"6a22bc59-6a04-49b4-aa79-225e9571ee71\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-dx8db" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.175684 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fd8ff4f5-94ee-4772-b6d8-d0d28d433891-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-bmd6j\" (UID: \"fd8ff4f5-94ee-4772-b6d8-d0d28d433891\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-bmd6j" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.175817 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xlt8c\" (UniqueName: \"kubernetes.io/projected/330eaab5-20d9-4b5c-a8f2-8bed8d2df7b7-kube-api-access-xlt8c\") pod \"service-ca-operator-777779d784-kv2r9\" (UID: \"330eaab5-20d9-4b5c-a8f2-8bed8d2df7b7\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-kv2r9" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.176171 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.177717 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/91845c42-ac54-4831-b8ac-73737902b703-console-oauth-config\") pod \"console-f9d7485db-mql86\" (UID: \"91845c42-ac54-4831-b8ac-73737902b703\") " pod="openshift-console/console-f9d7485db-mql86" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.178401 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/dad90295-65db-470c-8041-19fcf86d0439-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-lswml\" (UID: \"dad90295-65db-470c-8041-19fcf86d0439\") " pod="openshift-marketplace/marketplace-operator-79b997595-lswml" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.179337 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/368534d6-5359-447b-afd1-2a1c16c0c5d8-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-779px\" (UID: \"368534d6-5359-447b-afd1-2a1c16c0c5d8\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-779px" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.179752 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fd8ff4f5-94ee-4772-b6d8-d0d28d433891-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-bmd6j\" (UID: \"fd8ff4f5-94ee-4772-b6d8-d0d28d433891\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-bmd6j" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.180259 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.180974 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/471bd5c8-e8c1-4078-8e32-c0c30a1b237b-config\") pod \"etcd-operator-b45778765-lpjkw\" (UID: \"471bd5c8-e8c1-4078-8e32-c0c30a1b237b\") " pod="openshift-etcd-operator/etcd-operator-b45778765-lpjkw" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.181287 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/9728fd7e-6203-4082-9297-2d3fd9e17b74-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-zdhjn\" (UID: \"9728fd7e-6203-4082-9297-2d3fd9e17b74\") " pod="openshift-authentication/oauth-openshift-558db77b4-zdhjn" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.182211 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/471bd5c8-e8c1-4078-8e32-c0c30a1b237b-config\") pod \"etcd-operator-b45778765-lpjkw\" (UID: \"471bd5c8-e8c1-4078-8e32-c0c30a1b237b\") " pod="openshift-etcd-operator/etcd-operator-b45778765-lpjkw" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.185948 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c2q59\" (UniqueName: \"kubernetes.io/projected/b6e8630c-f357-4c2e-88d0-45218ea79981-kube-api-access-c2q59\") pod \"ingress-operator-5b745b69d9-4bpqt\" (UID: \"b6e8630c-f357-4c2e-88d0-45218ea79981\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-4bpqt" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.186159 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/d379e39e-ec0e-4419-8ad9-3deae28a5729-node-bootstrap-token\") pod \"machine-config-server-26486\" (UID: \"d379e39e-ec0e-4419-8ad9-3deae28a5729\") " pod="openshift-machine-config-operator/machine-config-server-26486" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.186642 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.186865 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/8fd9a883-0e5e-424f-bc31-199f2103c548-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-8dvz9\" (UID: \"8fd9a883-0e5e-424f-bc31-199f2103c548\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-8dvz9" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.187071 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.187346 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/e491ff79-3c18-4cf1-9e8b-3216fc5b2e7e-images\") pod \"machine-config-operator-74547568cd-j4xlb\" (UID: \"e491ff79-3c18-4cf1-9e8b-3216fc5b2e7e\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-j4xlb" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.187502 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/e491ff79-3c18-4cf1-9e8b-3216fc5b2e7e-proxy-tls\") pod \"machine-config-operator-74547568cd-j4xlb\" (UID: \"e491ff79-3c18-4cf1-9e8b-3216fc5b2e7e\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-j4xlb" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.188536 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/c612cdf9-575c-4d80-9a48-93e32d5673ae-mountpoint-dir\") pod \"csi-hostpathplugin-gntl4\" (UID: \"c612cdf9-575c-4d80-9a48-93e32d5673ae\") " pod="hostpath-provisioner/csi-hostpathplugin-gntl4" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.188589 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/c612cdf9-575c-4d80-9a48-93e32d5673ae-csi-data-dir\") pod \"csi-hostpathplugin-gntl4\" (UID: \"c612cdf9-575c-4d80-9a48-93e32d5673ae\") " pod="hostpath-provisioner/csi-hostpathplugin-gntl4" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.188623 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/c612cdf9-575c-4d80-9a48-93e32d5673ae-mountpoint-dir\") pod \"csi-hostpathplugin-gntl4\" (UID: \"c612cdf9-575c-4d80-9a48-93e32d5673ae\") " pod="hostpath-provisioner/csi-hostpathplugin-gntl4" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.188746 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/c612cdf9-575c-4d80-9a48-93e32d5673ae-csi-data-dir\") pod \"csi-hostpathplugin-gntl4\" (UID: \"c612cdf9-575c-4d80-9a48-93e32d5673ae\") " pod="hostpath-provisioner/csi-hostpathplugin-gntl4" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.189285 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/e63ba05b-c0ce-43bc-b660-f5d93a3c45a5-metrics-tls\") pod \"dns-default-jrblr\" (UID: \"e63ba05b-c0ce-43bc-b660-f5d93a3c45a5\") " pod="openshift-dns/dns-default-jrblr" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.189336 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/3cea010a-6482-47b3-9c79-45432b921956-webhook-cert\") pod \"packageserver-d55dfcdfc-8kz2s\" (UID: \"3cea010a-6482-47b3-9c79-45432b921956\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-8kz2s" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.189363 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fd8ff4f5-94ee-4772-b6d8-d0d28d433891-config\") pod \"kube-controller-manager-operator-78b949d7b-bmd6j\" (UID: \"fd8ff4f5-94ee-4772-b6d8-d0d28d433891\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-bmd6j" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.189413 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/dad90295-65db-470c-8041-19fcf86d0439-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-lswml\" (UID: \"dad90295-65db-470c-8041-19fcf86d0439\") " pod="openshift-marketplace/marketplace-operator-79b997595-lswml" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.189465 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tfk4v\" (UniqueName: \"kubernetes.io/projected/471bd5c8-e8c1-4078-8e32-c0c30a1b237b-kube-api-access-tfk4v\") pod \"etcd-operator-b45778765-lpjkw\" (UID: \"471bd5c8-e8c1-4078-8e32-c0c30a1b237b\") " pod="openshift-etcd-operator/etcd-operator-b45778765-lpjkw" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.189493 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q8whm\" (UniqueName: \"kubernetes.io/projected/e491ff79-3c18-4cf1-9e8b-3216fc5b2e7e-kube-api-access-q8whm\") pod \"machine-config-operator-74547568cd-j4xlb\" (UID: \"e491ff79-3c18-4cf1-9e8b-3216fc5b2e7e\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-j4xlb" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.189527 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/fd8ff4f5-94ee-4772-b6d8-d0d28d433891-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-bmd6j\" (UID: \"fd8ff4f5-94ee-4772-b6d8-d0d28d433891\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-bmd6j" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.189551 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/e491ff79-3c18-4cf1-9e8b-3216fc5b2e7e-auth-proxy-config\") pod \"machine-config-operator-74547568cd-j4xlb\" (UID: \"e491ff79-3c18-4cf1-9e8b-3216fc5b2e7e\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-j4xlb" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.189573 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/7a2fedca-4a36-42ec-a6b6-63fcd2de6c07-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-6b2dk\" (UID: \"7a2fedca-4a36-42ec-a6b6-63fcd2de6c07\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-6b2dk" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.189597 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/471bd5c8-e8c1-4078-8e32-c0c30a1b237b-serving-cert\") pod \"etcd-operator-b45778765-lpjkw\" (UID: \"471bd5c8-e8c1-4078-8e32-c0c30a1b237b\") " pod="openshift-etcd-operator/etcd-operator-b45778765-lpjkw" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.189617 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/c612cdf9-575c-4d80-9a48-93e32d5673ae-plugins-dir\") pod \"csi-hostpathplugin-gntl4\" (UID: \"c612cdf9-575c-4d80-9a48-93e32d5673ae\") " pod="hostpath-provisioner/csi-hostpathplugin-gntl4" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.189648 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bk48h\" (UniqueName: \"kubernetes.io/projected/c612cdf9-575c-4d80-9a48-93e32d5673ae-kube-api-access-bk48h\") pod \"csi-hostpathplugin-gntl4\" (UID: \"c612cdf9-575c-4d80-9a48-93e32d5673ae\") " pod="hostpath-provisioner/csi-hostpathplugin-gntl4" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.189681 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bsp9t\" (UniqueName: \"kubernetes.io/projected/b1377377-5ec9-4ff9-8550-aa1ae603a8cf-kube-api-access-bsp9t\") pod \"ingress-canary-2kbdk\" (UID: \"b1377377-5ec9-4ff9-8550-aa1ae603a8cf\") " pod="openshift-ingress-canary/ingress-canary-2kbdk" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.189706 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/40dd9f5b-066f-4400-a4a1-6a7a9eda8c90-profile-collector-cert\") pod \"catalog-operator-68c6474976-79sb7\" (UID: \"40dd9f5b-066f-4400-a4a1-6a7a9eda8c90\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-79sb7" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.190475 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/e491ff79-3c18-4cf1-9e8b-3216fc5b2e7e-auth-proxy-config\") pod \"machine-config-operator-74547568cd-j4xlb\" (UID: \"e491ff79-3c18-4cf1-9e8b-3216fc5b2e7e\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-j4xlb" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.191422 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/c612cdf9-575c-4d80-9a48-93e32d5673ae-plugins-dir\") pod \"csi-hostpathplugin-gntl4\" (UID: \"c612cdf9-575c-4d80-9a48-93e32d5673ae\") " pod="hostpath-provisioner/csi-hostpathplugin-gntl4" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.192067 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/7a2fedca-4a36-42ec-a6b6-63fcd2de6c07-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-6b2dk\" (UID: \"7a2fedca-4a36-42ec-a6b6-63fcd2de6c07\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-6b2dk" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.192323 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/e491ff79-3c18-4cf1-9e8b-3216fc5b2e7e-images\") pod \"machine-config-operator-74547568cd-j4xlb\" (UID: \"e491ff79-3c18-4cf1-9e8b-3216fc5b2e7e\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-j4xlb" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.192934 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/8fd9a883-0e5e-424f-bc31-199f2103c548-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-8dvz9\" (UID: \"8fd9a883-0e5e-424f-bc31-199f2103c548\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-8dvz9" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.193598 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fd8ff4f5-94ee-4772-b6d8-d0d28d433891-config\") pod \"kube-controller-manager-operator-78b949d7b-bmd6j\" (UID: \"fd8ff4f5-94ee-4772-b6d8-d0d28d433891\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-bmd6j" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.194961 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/e491ff79-3c18-4cf1-9e8b-3216fc5b2e7e-proxy-tls\") pod \"machine-config-operator-74547568cd-j4xlb\" (UID: \"e491ff79-3c18-4cf1-9e8b-3216fc5b2e7e\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-j4xlb" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.196764 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/471bd5c8-e8c1-4078-8e32-c0c30a1b237b-serving-cert\") pod \"etcd-operator-b45778765-lpjkw\" (UID: \"471bd5c8-e8c1-4078-8e32-c0c30a1b237b\") " pod="openshift-etcd-operator/etcd-operator-b45778765-lpjkw" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.198370 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/330eaab5-20d9-4b5c-a8f2-8bed8d2df7b7-serving-cert\") pod \"service-ca-operator-777779d784-kv2r9\" (UID: \"330eaab5-20d9-4b5c-a8f2-8bed8d2df7b7\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-kv2r9" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.198812 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/3cea010a-6482-47b3-9c79-45432b921956-webhook-cert\") pod \"packageserver-d55dfcdfc-8kz2s\" (UID: \"3cea010a-6482-47b3-9c79-45432b921956\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-8kz2s" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.202949 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.203821 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/368534d6-5359-447b-afd1-2a1c16c0c5d8-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-779px\" (UID: \"368534d6-5359-447b-afd1-2a1c16c0c5d8\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-779px" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.204386 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/91845c42-ac54-4831-b8ac-73737902b703-console-serving-cert\") pod \"console-f9d7485db-mql86\" (UID: \"91845c42-ac54-4831-b8ac-73737902b703\") " pod="openshift-console/console-f9d7485db-mql86" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.204609 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/df61f830-b312-4a01-8d17-057799312936-registry-tls\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.206016 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-ltcjq" event={"ID":"ddba54c6-0a85-4055-bc93-89e961961cd7","Type":"ContainerStarted","Data":"ff7b2cba184ee6b0298741d6a53d5ea1d4f971ff8ee4f821cda4de972c1d967d"} Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.207041 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8e54e1ee-a1b8-454c-8b09-e252ca3fe9da-metrics-certs\") pod \"router-default-5444994796-jpxd9\" (UID: \"8e54e1ee-a1b8-454c-8b09-e252ca3fe9da\") " pod="openshift-ingress/router-default-5444994796-jpxd9" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.207870 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/91845c42-ac54-4831-b8ac-73737902b703-trusted-ca-bundle\") pod \"console-f9d7485db-mql86\" (UID: \"91845c42-ac54-4831-b8ac-73737902b703\") " pod="openshift-console/console-f9d7485db-mql86" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.208141 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/279ec67d-fb6e-49a2-8339-854132f4e3ab-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-fp6bq\" (UID: \"279ec67d-fb6e-49a2-8339-854132f4e3ab\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-fp6bq" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.216034 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/40dd9f5b-066f-4400-a4a1-6a7a9eda8c90-profile-collector-cert\") pod \"catalog-operator-68c6474976-79sb7\" (UID: \"40dd9f5b-066f-4400-a4a1-6a7a9eda8c90\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-79sb7" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.223336 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.260748 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b41a4b6f-931f-41d8-86b4-a9f1193eb48a-srv-cert\") pod \"olm-operator-6b444d44fb-v88cc\" (UID: \"b41a4b6f-931f-41d8-86b4-a9f1193eb48a\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-v88cc" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.291102 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.291751 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b41a4b6f-931f-41d8-86b4-a9f1193eb48a-profile-collector-cert\") pod \"olm-operator-6b444d44fb-v88cc\" (UID: \"b41a4b6f-931f-41d8-86b4-a9f1193eb48a\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-v88cc" Jan 29 12:08:17 crc kubenswrapper[4753]: E0129 12:08:17.291832 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:17.791782423 +0000 UTC m=+112.043863878 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.291907 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/6fa51f84-9cf2-491a-bc13-0b2c4b48c5c0-signing-cabundle\") pod \"service-ca-9c57cc56f-zm8vw\" (UID: \"6fa51f84-9cf2-491a-bc13-0b2c4b48c5c0\") " pod="openshift-service-ca/service-ca-9c57cc56f-zm8vw" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.291975 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4cwgg\" (UniqueName: \"kubernetes.io/projected/4f48974c-8787-482a-b962-f7646e12952e-kube-api-access-4cwgg\") pod \"collect-profiles-29494800-86kw2\" (UID: \"4f48974c-8787-482a-b962-f7646e12952e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494800-86kw2" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.292016 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/d379e39e-ec0e-4419-8ad9-3deae28a5729-certs\") pod \"machine-config-server-26486\" (UID: \"d379e39e-ec0e-4419-8ad9-3deae28a5729\") " pod="openshift-machine-config-operator/machine-config-server-26486" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.292058 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/b6e8630c-f357-4c2e-88d0-45218ea79981-metrics-tls\") pod \"ingress-operator-5b745b69d9-4bpqt\" (UID: \"b6e8630c-f357-4c2e-88d0-45218ea79981\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-4bpqt" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.292096 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-spxzk\" (UniqueName: \"kubernetes.io/projected/e63ba05b-c0ce-43bc-b660-f5d93a3c45a5-kube-api-access-spxzk\") pod \"dns-default-jrblr\" (UID: \"e63ba05b-c0ce-43bc-b660-f5d93a3c45a5\") " pod="openshift-dns/dns-default-jrblr" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.292174 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/3cea010a-6482-47b3-9c79-45432b921956-tmpfs\") pod \"packageserver-d55dfcdfc-8kz2s\" (UID: \"3cea010a-6482-47b3-9c79-45432b921956\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-8kz2s" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.292291 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.292815 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/471bd5c8-e8c1-4078-8e32-c0c30a1b237b-etcd-ca\") pod \"etcd-operator-b45778765-lpjkw\" (UID: \"471bd5c8-e8c1-4078-8e32-c0c30a1b237b\") " pod="openshift-etcd-operator/etcd-operator-b45778765-lpjkw" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.292883 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/b6e8630c-f357-4c2e-88d0-45218ea79981-bound-sa-token\") pod \"ingress-operator-5b745b69d9-4bpqt\" (UID: \"b6e8630c-f357-4c2e-88d0-45218ea79981\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-4bpqt" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.292938 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/7a2fedca-4a36-42ec-a6b6-63fcd2de6c07-proxy-tls\") pod \"machine-config-controller-84d6567774-6b2dk\" (UID: \"7a2fedca-4a36-42ec-a6b6-63fcd2de6c07\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-6b2dk" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.292972 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d2148dab-1475-49ad-b154-99cd20f15125-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-6xpng\" (UID: \"d2148dab-1475-49ad-b154-99cd20f15125\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-6xpng" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.293039 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/96da6032-b04a-4eef-8170-a57f2f9b00ba-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-nbdct\" (UID: \"96da6032-b04a-4eef-8170-a57f2f9b00ba\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-nbdct" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.293077 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/8e54e1ee-a1b8-454c-8b09-e252ca3fe9da-default-certificate\") pod \"router-default-5444994796-jpxd9\" (UID: \"8e54e1ee-a1b8-454c-8b09-e252ca3fe9da\") " pod="openshift-ingress/router-default-5444994796-jpxd9" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.293123 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hndgf\" (UniqueName: \"kubernetes.io/projected/368534d6-5359-447b-afd1-2a1c16c0c5d8-kube-api-access-hndgf\") pod \"package-server-manager-789f6589d5-779px\" (UID: \"368534d6-5359-447b-afd1-2a1c16c0c5d8\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-779px" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.293186 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wtgvz\" (UniqueName: \"kubernetes.io/projected/279ec67d-fb6e-49a2-8339-854132f4e3ab-kube-api-access-wtgvz\") pod \"kube-storage-version-migrator-operator-b67b599dd-fp6bq\" (UID: \"279ec67d-fb6e-49a2-8339-854132f4e3ab\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-fp6bq" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.293239 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4f48974c-8787-482a-b962-f7646e12952e-config-volume\") pod \"collect-profiles-29494800-86kw2\" (UID: \"4f48974c-8787-482a-b962-f7646e12952e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494800-86kw2" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.293275 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4f48974c-8787-482a-b962-f7646e12952e-secret-volume\") pod \"collect-profiles-29494800-86kw2\" (UID: \"4f48974c-8787-482a-b962-f7646e12952e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494800-86kw2" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.293312 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6e8630c-f357-4c2e-88d0-45218ea79981-trusted-ca\") pod \"ingress-operator-5b745b69d9-4bpqt\" (UID: \"b6e8630c-f357-4c2e-88d0-45218ea79981\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-4bpqt" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.293359 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/3cea010a-6482-47b3-9c79-45432b921956-apiservice-cert\") pod \"packageserver-d55dfcdfc-8kz2s\" (UID: \"3cea010a-6482-47b3-9c79-45432b921956\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-8kz2s" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.293431 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e63ba05b-c0ce-43bc-b660-f5d93a3c45a5-config-volume\") pod \"dns-default-jrblr\" (UID: \"e63ba05b-c0ce-43bc-b660-f5d93a3c45a5\") " pod="openshift-dns/dns-default-jrblr" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.293472 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d2148dab-1475-49ad-b154-99cd20f15125-config\") pod \"kube-apiserver-operator-766d6c64bb-6xpng\" (UID: \"d2148dab-1475-49ad-b154-99cd20f15125\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-6xpng" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.293512 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/40dd9f5b-066f-4400-a4a1-6a7a9eda8c90-srv-cert\") pod \"catalog-operator-68c6474976-79sb7\" (UID: \"40dd9f5b-066f-4400-a4a1-6a7a9eda8c90\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-79sb7" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.293562 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sxd8k\" (UniqueName: \"kubernetes.io/projected/b41a4b6f-931f-41d8-86b4-a9f1193eb48a-kube-api-access-sxd8k\") pod \"olm-operator-6b444d44fb-v88cc\" (UID: \"b41a4b6f-931f-41d8-86b4-a9f1193eb48a\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-v88cc" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.293596 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zpwp6\" (UniqueName: \"kubernetes.io/projected/8e54e1ee-a1b8-454c-8b09-e252ca3fe9da-kube-api-access-zpwp6\") pod \"router-default-5444994796-jpxd9\" (UID: \"8e54e1ee-a1b8-454c-8b09-e252ca3fe9da\") " pod="openshift-ingress/router-default-5444994796-jpxd9" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.293618 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/96da6032-b04a-4eef-8170-a57f2f9b00ba-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-nbdct\" (UID: \"96da6032-b04a-4eef-8170-a57f2f9b00ba\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-nbdct" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.293653 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kpq58\" (UniqueName: \"kubernetes.io/projected/8fd9a883-0e5e-424f-bc31-199f2103c548-kube-api-access-kpq58\") pod \"control-plane-machine-set-operator-78cbb6b69f-8dvz9\" (UID: \"8fd9a883-0e5e-424f-bc31-199f2103c548\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-8dvz9" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.293702 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f5dcg\" (UniqueName: \"kubernetes.io/projected/d379e39e-ec0e-4419-8ad9-3deae28a5729-kube-api-access-f5dcg\") pod \"machine-config-server-26486\" (UID: \"d379e39e-ec0e-4419-8ad9-3deae28a5729\") " pod="openshift-machine-config-operator/machine-config-server-26486" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.293777 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/471bd5c8-e8c1-4078-8e32-c0c30a1b237b-etcd-service-ca\") pod \"etcd-operator-b45778765-lpjkw\" (UID: \"471bd5c8-e8c1-4078-8e32-c0c30a1b237b\") " pod="openshift-etcd-operator/etcd-operator-b45778765-lpjkw" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.293815 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/471bd5c8-e8c1-4078-8e32-c0c30a1b237b-etcd-client\") pod \"etcd-operator-b45778765-lpjkw\" (UID: \"471bd5c8-e8c1-4078-8e32-c0c30a1b237b\") " pod="openshift-etcd-operator/etcd-operator-b45778765-lpjkw" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.293867 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8t92k\" (UniqueName: \"kubernetes.io/projected/dad90295-65db-470c-8041-19fcf86d0439-kube-api-access-8t92k\") pod \"marketplace-operator-79b997595-lswml\" (UID: \"dad90295-65db-470c-8041-19fcf86d0439\") " pod="openshift-marketplace/marketplace-operator-79b997595-lswml" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.293933 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4ghtk\" (UniqueName: \"kubernetes.io/projected/3cea010a-6482-47b3-9c79-45432b921956-kube-api-access-4ghtk\") pod \"packageserver-d55dfcdfc-8kz2s\" (UID: \"3cea010a-6482-47b3-9c79-45432b921956\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-8kz2s" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.293977 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dpx95\" (UniqueName: \"kubernetes.io/projected/7a2fedca-4a36-42ec-a6b6-63fcd2de6c07-kube-api-access-dpx95\") pod \"machine-config-controller-84d6567774-6b2dk\" (UID: \"7a2fedca-4a36-42ec-a6b6-63fcd2de6c07\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-6b2dk" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.294125 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bjvsq\" (UniqueName: \"kubernetes.io/projected/8fe1a105-96fa-4847-97a5-3d83d04a86ec-kube-api-access-bjvsq\") pod \"migrator-59844c95c7-gkf8g\" (UID: \"8fe1a105-96fa-4847-97a5-3d83d04a86ec\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-gkf8g" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.294170 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8e54e1ee-a1b8-454c-8b09-e252ca3fe9da-service-ca-bundle\") pod \"router-default-5444994796-jpxd9\" (UID: \"8e54e1ee-a1b8-454c-8b09-e252ca3fe9da\") " pod="openshift-ingress/router-default-5444994796-jpxd9" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.294207 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/330eaab5-20d9-4b5c-a8f2-8bed8d2df7b7-config\") pod \"service-ca-operator-777779d784-kv2r9\" (UID: \"330eaab5-20d9-4b5c-a8f2-8bed8d2df7b7\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-kv2r9" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.294279 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/8e54e1ee-a1b8-454c-8b09-e252ca3fe9da-stats-auth\") pod \"router-default-5444994796-jpxd9\" (UID: \"8e54e1ee-a1b8-454c-8b09-e252ca3fe9da\") " pod="openshift-ingress/router-default-5444994796-jpxd9" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.294321 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/c612cdf9-575c-4d80-9a48-93e32d5673ae-socket-dir\") pod \"csi-hostpathplugin-gntl4\" (UID: \"c612cdf9-575c-4d80-9a48-93e32d5673ae\") " pod="hostpath-provisioner/csi-hostpathplugin-gntl4" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.294358 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rx9j4\" (UniqueName: \"kubernetes.io/projected/d8fa6dd3-8d5a-4518-a95a-d1782279340d-kube-api-access-rx9j4\") pod \"multus-admission-controller-857f4d67dd-gln66\" (UID: \"d8fa6dd3-8d5a-4518-a95a-d1782279340d\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-gln66" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.295771 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/471bd5c8-e8c1-4078-8e32-c0c30a1b237b-etcd-ca\") pod \"etcd-operator-b45778765-lpjkw\" (UID: \"471bd5c8-e8c1-4078-8e32-c0c30a1b237b\") " pod="openshift-etcd-operator/etcd-operator-b45778765-lpjkw" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.296143 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/471bd5c8-e8c1-4078-8e32-c0c30a1b237b-etcd-service-ca\") pod \"etcd-operator-b45778765-lpjkw\" (UID: \"471bd5c8-e8c1-4078-8e32-c0c30a1b237b\") " pod="openshift-etcd-operator/etcd-operator-b45778765-lpjkw" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.314767 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/3cea010a-6482-47b3-9c79-45432b921956-tmpfs\") pod \"packageserver-d55dfcdfc-8kz2s\" (UID: \"3cea010a-6482-47b3-9c79-45432b921956\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-8kz2s" Jan 29 12:08:17 crc kubenswrapper[4753]: E0129 12:08:17.321743 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:17.821705407 +0000 UTC m=+112.073786862 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.330779 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d2148dab-1475-49ad-b154-99cd20f15125-config\") pod \"kube-apiserver-operator-766d6c64bb-6xpng\" (UID: \"d2148dab-1475-49ad-b154-99cd20f15125\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-6xpng" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.336415 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/3cea010a-6482-47b3-9c79-45432b921956-apiservice-cert\") pod \"packageserver-d55dfcdfc-8kz2s\" (UID: \"3cea010a-6482-47b3-9c79-45432b921956\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-8kz2s" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.364444 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b41a4b6f-931f-41d8-86b4-a9f1193eb48a-profile-collector-cert\") pod \"olm-operator-6b444d44fb-v88cc\" (UID: \"b41a4b6f-931f-41d8-86b4-a9f1193eb48a\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-v88cc" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.364532 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/96da6032-b04a-4eef-8170-a57f2f9b00ba-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-nbdct\" (UID: \"96da6032-b04a-4eef-8170-a57f2f9b00ba\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-nbdct" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.589078 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6e8630c-f357-4c2e-88d0-45218ea79981-trusted-ca\") pod \"ingress-operator-5b745b69d9-4bpqt\" (UID: \"b6e8630c-f357-4c2e-88d0-45218ea79981\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-4bpqt" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.593865 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d2148dab-1475-49ad-b154-99cd20f15125-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-6xpng\" (UID: \"d2148dab-1475-49ad-b154-99cd20f15125\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-6xpng" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.594500 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/7a2fedca-4a36-42ec-a6b6-63fcd2de6c07-proxy-tls\") pod \"machine-config-controller-84d6567774-6b2dk\" (UID: \"7a2fedca-4a36-42ec-a6b6-63fcd2de6c07\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-6b2dk" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.595062 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4f48974c-8787-482a-b962-f7646e12952e-secret-volume\") pod \"collect-profiles-29494800-86kw2\" (UID: \"4f48974c-8787-482a-b962-f7646e12952e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494800-86kw2" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.598844 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.599621 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/471bd5c8-e8c1-4078-8e32-c0c30a1b237b-etcd-client\") pod \"etcd-operator-b45778765-lpjkw\" (UID: \"471bd5c8-e8c1-4078-8e32-c0c30a1b237b\") " pod="openshift-etcd-operator/etcd-operator-b45778765-lpjkw" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.599988 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.600477 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.602507 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.607263 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/b6e8630c-f357-4c2e-88d0-45218ea79981-metrics-tls\") pod \"ingress-operator-5b745b69d9-4bpqt\" (UID: \"b6e8630c-f357-4c2e-88d0-45218ea79981\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-4bpqt" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.608249 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/40dd9f5b-066f-4400-a4a1-6a7a9eda8c90-srv-cert\") pod \"catalog-operator-68c6474976-79sb7\" (UID: \"40dd9f5b-066f-4400-a4a1-6a7a9eda8c90\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-79sb7" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.609305 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/6fa51f84-9cf2-491a-bc13-0b2c4b48c5c0-signing-cabundle\") pod \"service-ca-9c57cc56f-zm8vw\" (UID: \"6fa51f84-9cf2-491a-bc13-0b2c4b48c5c0\") " pod="openshift-service-ca/service-ca-9c57cc56f-zm8vw" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.610279 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/330eaab5-20d9-4b5c-a8f2-8bed8d2df7b7-config\") pod \"service-ca-operator-777779d784-kv2r9\" (UID: \"330eaab5-20d9-4b5c-a8f2-8bed8d2df7b7\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-kv2r9" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.611434 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8e54e1ee-a1b8-454c-8b09-e252ca3fe9da-service-ca-bundle\") pod \"router-default-5444994796-jpxd9\" (UID: \"8e54e1ee-a1b8-454c-8b09-e252ca3fe9da\") " pod="openshift-ingress/router-default-5444994796-jpxd9" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.611522 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.612070 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.612339 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/c612cdf9-575c-4d80-9a48-93e32d5673ae-socket-dir\") pod \"csi-hostpathplugin-gntl4\" (UID: \"c612cdf9-575c-4d80-9a48-93e32d5673ae\") " pod="hostpath-provisioner/csi-hostpathplugin-gntl4" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.612552 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.614100 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:17 crc kubenswrapper[4753]: E0129 12:08:17.614726 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:18.114700258 +0000 UTC m=+112.366781723 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.621172 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/8e54e1ee-a1b8-454c-8b09-e252ca3fe9da-stats-auth\") pod \"router-default-5444994796-jpxd9\" (UID: \"8e54e1ee-a1b8-454c-8b09-e252ca3fe9da\") " pod="openshift-ingress/router-default-5444994796-jpxd9" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.624428 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:17 crc kubenswrapper[4753]: E0129 12:08:17.625785 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:18.125766146 +0000 UTC m=+112.377847601 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.626504 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.626954 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.627124 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.629121 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.629364 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.629690 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.630627 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.630656 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/8e54e1ee-a1b8-454c-8b09-e252ca3fe9da-default-certificate\") pod \"router-default-5444994796-jpxd9\" (UID: \"8e54e1ee-a1b8-454c-8b09-e252ca3fe9da\") " pod="openshift-ingress/router-default-5444994796-jpxd9" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.632183 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.632560 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.632914 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.635276 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/dad90295-65db-470c-8041-19fcf86d0439-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-lswml\" (UID: \"dad90295-65db-470c-8041-19fcf86d0439\") " pod="openshift-marketplace/marketplace-operator-79b997595-lswml" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.637092 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.637290 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4f48974c-8787-482a-b962-f7646e12952e-config-volume\") pod \"collect-profiles-29494800-86kw2\" (UID: \"4f48974c-8787-482a-b962-f7646e12952e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494800-86kw2" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.638901 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.639468 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.643076 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/dad90295-65db-470c-8041-19fcf86d0439-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-lswml\" (UID: \"dad90295-65db-470c-8041-19fcf86d0439\") " pod="openshift-marketplace/marketplace-operator-79b997595-lswml" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.643295 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.647330 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/d8fa6dd3-8d5a-4518-a95a-d1782279340d-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-gln66\" (UID: \"d8fa6dd3-8d5a-4518-a95a-d1782279340d\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-gln66" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.648244 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/secret/d379e39e-ec0e-4419-8ad9-3deae28a5729-certs\") pod \"machine-config-server-26486\" (UID: \"d379e39e-ec0e-4419-8ad9-3deae28a5729\") " pod="openshift-machine-config-operator/machine-config-server-26486" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.658667 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/6fa51f84-9cf2-491a-bc13-0b2c4b48c5c0-signing-key\") pod \"service-ca-9c57cc56f-zm8vw\" (UID: \"6fa51f84-9cf2-491a-bc13-0b2c4b48c5c0\") " pod="openshift-service-ca/service-ca-9c57cc56f-zm8vw" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.662020 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.664119 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/d379e39e-ec0e-4419-8ad9-3deae28a5729-node-bootstrap-token\") pod \"machine-config-server-26486\" (UID: \"d379e39e-ec0e-4419-8ad9-3deae28a5729\") " pod="openshift-machine-config-operator/machine-config-server-26486" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.674747 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b1377377-5ec9-4ff9-8550-aa1ae603a8cf-cert\") pod \"ingress-canary-2kbdk\" (UID: \"b1377377-5ec9-4ff9-8550-aa1ae603a8cf\") " pod="openshift-ingress-canary/ingress-canary-2kbdk" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.682611 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.689795 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e63ba05b-c0ce-43bc-b660-f5d93a3c45a5-config-volume\") pod \"dns-default-jrblr\" (UID: \"e63ba05b-c0ce-43bc-b660-f5d93a3c45a5\") " pod="openshift-dns/dns-default-jrblr" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.702007 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.721997 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.727843 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:17 crc kubenswrapper[4753]: E0129 12:08:17.727999 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:18.227968327 +0000 UTC m=+112.480049782 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.733446 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/e63ba05b-c0ce-43bc-b660-f5d93a3c45a5-metrics-tls\") pod \"dns-default-jrblr\" (UID: \"e63ba05b-c0ce-43bc-b660-f5d93a3c45a5\") " pod="openshift-dns/dns-default-jrblr" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.734391 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:17 crc kubenswrapper[4753]: E0129 12:08:17.734940 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:18.234923232 +0000 UTC m=+112.487004677 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.783076 4753 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.836088 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:17 crc kubenswrapper[4753]: E0129 12:08:17.837134 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:18.337104523 +0000 UTC m=+112.589185978 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.844656 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.877446 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-nrt2l"] Jan 29 12:08:17 crc kubenswrapper[4753]: I0129 12:08:17.939221 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:17 crc kubenswrapper[4753]: E0129 12:08:17.939988 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:18.439970604 +0000 UTC m=+112.692052059 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:18 crc kubenswrapper[4753]: I0129 12:08:18.018763 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-255vg"] Jan 29 12:08:18 crc kubenswrapper[4753]: I0129 12:08:18.035197 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-w5dnh"] Jan 29 12:08:18 crc kubenswrapper[4753]: I0129 12:08:18.040892 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:18 crc kubenswrapper[4753]: E0129 12:08:18.041660 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:18.541642099 +0000 UTC m=+112.793723554 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:18 crc kubenswrapper[4753]: I0129 12:08:18.045274 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qwnwx\" (UniqueName: \"kubernetes.io/projected/3519d984-8bed-45ca-9e6d-687bec69ee24-kube-api-access-qwnwx\") pod \"cluster-samples-operator-665b6dd947-g5td2\" (UID: \"3519d984-8bed-45ca-9e6d-687bec69ee24\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-g5td2" Jan 29 12:08:18 crc kubenswrapper[4753]: I0129 12:08:18.102045 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-g5td2" Jan 29 12:08:18 crc kubenswrapper[4753]: I0129 12:08:18.110508 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Jan 29 12:08:18 crc kubenswrapper[4753]: I0129 12:08:18.116677 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Jan 29 12:08:18 crc kubenswrapper[4753]: I0129 12:08:18.119208 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Jan 29 12:08:18 crc kubenswrapper[4753]: W0129 12:08:18.125772 4753 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod43f6f4f5_8e79_4142_95ee_84b051b27cf3.slice/crio-28168f49ef1c51a88dbf7f30780f0a762dfdb52324146d78470e09415ceacc78 WatchSource:0}: Error finding container 28168f49ef1c51a88dbf7f30780f0a762dfdb52324146d78470e09415ceacc78: Status 404 returned error can't find the container with id 28168f49ef1c51a88dbf7f30780f0a762dfdb52324146d78470e09415ceacc78 Jan 29 12:08:18 crc kubenswrapper[4753]: I0129 12:08:18.143214 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:18 crc kubenswrapper[4753]: E0129 12:08:18.145809 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:18.645782108 +0000 UTC m=+112.897863573 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:18 crc kubenswrapper[4753]: I0129 12:08:18.153212 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-5wmqw"] Jan 29 12:08:18 crc kubenswrapper[4753]: I0129 12:08:18.179603 4753 request.go:700] Waited for 1.014291464s due to client-side throttling, not priority and fairness, request: POST:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console/serviceaccounts/default/token Jan 29 12:08:18 crc kubenswrapper[4753]: I0129 12:08:18.180758 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wwrp4\" (UniqueName: \"kubernetes.io/projected/40dd9f5b-066f-4400-a4a1-6a7a9eda8c90-kube-api-access-wwrp4\") pod \"catalog-operator-68c6474976-79sb7\" (UID: \"40dd9f5b-066f-4400-a4a1-6a7a9eda8c90\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-79sb7" Jan 29 12:08:18 crc kubenswrapper[4753]: I0129 12:08:18.181123 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kpjl9\" (UniqueName: \"kubernetes.io/projected/91845c42-ac54-4831-b8ac-73737902b703-kube-api-access-kpjl9\") pod \"console-f9d7485db-mql86\" (UID: \"91845c42-ac54-4831-b8ac-73737902b703\") " pod="openshift-console/console-f9d7485db-mql86" Jan 29 12:08:18 crc kubenswrapper[4753]: I0129 12:08:18.181191 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q4hkh\" (UniqueName: \"kubernetes.io/projected/6a22bc59-6a04-49b4-aa79-225e9571ee71-kube-api-access-q4hkh\") pod \"route-controller-manager-6576b87f9c-dx8db\" (UID: \"6a22bc59-6a04-49b4-aa79-225e9571ee71\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-dx8db" Jan 29 12:08:18 crc kubenswrapper[4753]: I0129 12:08:18.181200 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/df61f830-b312-4a01-8d17-057799312936-bound-sa-token\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:18 crc kubenswrapper[4753]: I0129 12:08:18.181749 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q4nmh\" (UniqueName: \"kubernetes.io/projected/3628b07e-d247-4487-8095-821bf56656b8-kube-api-access-q4nmh\") pod \"controller-manager-879f6c89f-86kqx\" (UID: \"3628b07e-d247-4487-8095-821bf56656b8\") " pod="openshift-controller-manager/controller-manager-879f6c89f-86kqx" Jan 29 12:08:18 crc kubenswrapper[4753]: I0129 12:08:18.181785 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sk4v8\" (UniqueName: \"kubernetes.io/projected/e0c82664-4c0e-4f03-858f-dcf68539f92a-kube-api-access-sk4v8\") pod \"openshift-controller-manager-operator-756b6f6bc6-qkqfh\" (UID: \"e0c82664-4c0e-4f03-858f-dcf68539f92a\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-qkqfh" Jan 29 12:08:18 crc kubenswrapper[4753]: I0129 12:08:18.181860 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5mwvb\" (UniqueName: \"kubernetes.io/projected/6912f3bb-d8bc-4dc1-b3f0-9e7e1e8d354a-kube-api-access-5mwvb\") pod \"apiserver-7bbb656c7d-p7d5d\" (UID: \"6912f3bb-d8bc-4dc1-b3f0-9e7e1e8d354a\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-p7d5d" Jan 29 12:08:18 crc kubenswrapper[4753]: I0129 12:08:18.182100 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7kdx9\" (UniqueName: \"kubernetes.io/projected/6fa51f84-9cf2-491a-bc13-0b2c4b48c5c0-kube-api-access-7kdx9\") pod \"service-ca-9c57cc56f-zm8vw\" (UID: \"6fa51f84-9cf2-491a-bc13-0b2c4b48c5c0\") " pod="openshift-service-ca/service-ca-9c57cc56f-zm8vw" Jan 29 12:08:18 crc kubenswrapper[4753]: I0129 12:08:18.182107 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-766r8\" (UniqueName: \"kubernetes.io/projected/df61f830-b312-4a01-8d17-057799312936-kube-api-access-766r8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:18 crc kubenswrapper[4753]: I0129 12:08:18.182502 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9fdsb\" (UniqueName: \"kubernetes.io/projected/9728fd7e-6203-4082-9297-2d3fd9e17b74-kube-api-access-9fdsb\") pod \"oauth-openshift-558db77b4-zdhjn\" (UID: \"9728fd7e-6203-4082-9297-2d3fd9e17b74\") " pod="openshift-authentication/oauth-openshift-558db77b4-zdhjn" Jan 29 12:08:18 crc kubenswrapper[4753]: I0129 12:08:18.183159 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-47qr2\" (UniqueName: \"kubernetes.io/projected/e1a92560-535a-4f2c-9ce6-9940bfc1bf37-kube-api-access-47qr2\") pod \"openshift-apiserver-operator-796bbdcf4f-rwlht\" (UID: \"e1a92560-535a-4f2c-9ce6-9940bfc1bf37\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-rwlht" Jan 29 12:08:18 crc kubenswrapper[4753]: I0129 12:08:18.183187 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dds5m\" (UniqueName: \"kubernetes.io/projected/2d66d4a7-6db6-438c-9d2b-cb2bb0ef846b-kube-api-access-dds5m\") pod \"machine-api-operator-5694c8668f-s4nq8\" (UID: \"2d66d4a7-6db6-438c-9d2b-cb2bb0ef846b\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-s4nq8" Jan 29 12:08:18 crc kubenswrapper[4753]: I0129 12:08:18.184166 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pfjfd\" (UniqueName: \"kubernetes.io/projected/f9d4b340-8ceb-41e1-8289-f48222e5d3d8-kube-api-access-pfjfd\") pod \"console-operator-58897d9998-znsp6\" (UID: \"f9d4b340-8ceb-41e1-8289-f48222e5d3d8\") " pod="openshift-console-operator/console-operator-58897d9998-znsp6" Jan 29 12:08:18 crc kubenswrapper[4753]: I0129 12:08:18.189297 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d2148dab-1475-49ad-b154-99cd20f15125-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-6xpng\" (UID: \"d2148dab-1475-49ad-b154-99cd20f15125\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-6xpng" Jan 29 12:08:18 crc kubenswrapper[4753]: I0129 12:08:18.202098 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9rhnp\" (UniqueName: \"kubernetes.io/projected/200ffbfe-dff4-45e2-944d-34a3ad56f018-kube-api-access-9rhnp\") pod \"downloads-7954f5f757-b2rb9\" (UID: \"200ffbfe-dff4-45e2-944d-34a3ad56f018\") " pod="openshift-console/downloads-7954f5f757-b2rb9" Jan 29 12:08:18 crc kubenswrapper[4753]: I0129 12:08:18.219268 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-5wmqw" event={"ID":"b3a97911-76f6-4098-91b6-a67368bcac8e","Type":"ContainerStarted","Data":"938ca6783af10f7c4172c520b121cbe7b6620b5d75e51b39985fcd7798030ef5"} Jan 29 12:08:18 crc kubenswrapper[4753]: I0129 12:08:18.222641 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xlt8c\" (UniqueName: \"kubernetes.io/projected/330eaab5-20d9-4b5c-a8f2-8bed8d2df7b7-kube-api-access-xlt8c\") pod \"service-ca-operator-777779d784-kv2r9\" (UID: \"330eaab5-20d9-4b5c-a8f2-8bed8d2df7b7\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-kv2r9" Jan 29 12:08:18 crc kubenswrapper[4753]: I0129 12:08:18.225215 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-nrt2l" event={"ID":"86659644-dec3-4e1a-ba32-5b4487a2f4c8","Type":"ContainerStarted","Data":"31c05a4181d5b5290d3c772d6ef48fc391eb3c90ffdc8b912468c3b7c052bbef"} Jan 29 12:08:18 crc kubenswrapper[4753]: I0129 12:08:18.229507 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-s4nq8" Jan 29 12:08:18 crc kubenswrapper[4753]: I0129 12:08:18.234112 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-f6z5s"] Jan 29 12:08:18 crc kubenswrapper[4753]: I0129 12:08:18.236913 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-w5dnh" event={"ID":"887a7f51-26e3-4d7e-99ff-884f7389171a","Type":"ContainerStarted","Data":"0955b8a96b2ae73e3adc7b7c64cc34507149a645083f35c7abfb5ea0dc4a3d75"} Jan 29 12:08:18 crc kubenswrapper[4753]: I0129 12:08:18.237338 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-6xpng" Jan 29 12:08:18 crc kubenswrapper[4753]: I0129 12:08:18.239922 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-ltcjq" event={"ID":"ddba54c6-0a85-4055-bc93-89e961961cd7","Type":"ContainerStarted","Data":"d7194a6caf13fab26a33bb13e2522a59d46ef59d189a0874f4424255964af9ea"} Jan 29 12:08:18 crc kubenswrapper[4753]: I0129 12:08:18.240885 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-dx8db" Jan 29 12:08:18 crc kubenswrapper[4753]: I0129 12:08:18.244354 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:18 crc kubenswrapper[4753]: E0129 12:08:18.244505 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:18.744481465 +0000 UTC m=+112.996562930 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:18 crc kubenswrapper[4753]: I0129 12:08:18.244987 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:18 crc kubenswrapper[4753]: E0129 12:08:18.245502 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:18.745489625 +0000 UTC m=+112.997571080 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:18 crc kubenswrapper[4753]: I0129 12:08:18.247550 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-255vg" event={"ID":"43f6f4f5-8e79-4142-95ee-84b051b27cf3","Type":"ContainerStarted","Data":"28168f49ef1c51a88dbf7f30780f0a762dfdb52324146d78470e09415ceacc78"} Jan 29 12:08:18 crc kubenswrapper[4753]: I0129 12:08:18.260210 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/fd8ff4f5-94ee-4772-b6d8-d0d28d433891-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-bmd6j\" (UID: \"fd8ff4f5-94ee-4772-b6d8-d0d28d433891\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-bmd6j" Jan 29 12:08:18 crc kubenswrapper[4753]: I0129 12:08:18.264102 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c2q59\" (UniqueName: \"kubernetes.io/projected/b6e8630c-f357-4c2e-88d0-45218ea79981-kube-api-access-c2q59\") pod \"ingress-operator-5b745b69d9-4bpqt\" (UID: \"b6e8630c-f357-4c2e-88d0-45218ea79981\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-4bpqt" Jan 29 12:08:18 crc kubenswrapper[4753]: I0129 12:08:18.264629 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-bmd6j" Jan 29 12:08:18 crc kubenswrapper[4753]: I0129 12:08:18.277067 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bk48h\" (UniqueName: \"kubernetes.io/projected/c612cdf9-575c-4d80-9a48-93e32d5673ae-kube-api-access-bk48h\") pod \"csi-hostpathplugin-gntl4\" (UID: \"c612cdf9-575c-4d80-9a48-93e32d5673ae\") " pod="hostpath-provisioner/csi-hostpathplugin-gntl4" Jan 29 12:08:18 crc kubenswrapper[4753]: I0129 12:08:18.311600 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bsp9t\" (UniqueName: \"kubernetes.io/projected/b1377377-5ec9-4ff9-8550-aa1ae603a8cf-kube-api-access-bsp9t\") pod \"ingress-canary-2kbdk\" (UID: \"b1377377-5ec9-4ff9-8550-aa1ae603a8cf\") " pod="openshift-ingress-canary/ingress-canary-2kbdk" Jan 29 12:08:18 crc kubenswrapper[4753]: I0129 12:08:18.312191 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-kv2r9" Jan 29 12:08:18 crc kubenswrapper[4753]: I0129 12:08:18.316069 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-86kqx" Jan 29 12:08:18 crc kubenswrapper[4753]: I0129 12:08:18.345013 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q8whm\" (UniqueName: \"kubernetes.io/projected/e491ff79-3c18-4cf1-9e8b-3216fc5b2e7e-kube-api-access-q8whm\") pod \"machine-config-operator-74547568cd-j4xlb\" (UID: \"e491ff79-3c18-4cf1-9e8b-3216fc5b2e7e\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-j4xlb" Jan 29 12:08:18 crc kubenswrapper[4753]: I0129 12:08:18.345755 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:18 crc kubenswrapper[4753]: I0129 12:08:18.345894 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tfk4v\" (UniqueName: \"kubernetes.io/projected/471bd5c8-e8c1-4078-8e32-c0c30a1b237b-kube-api-access-tfk4v\") pod \"etcd-operator-b45778765-lpjkw\" (UID: \"471bd5c8-e8c1-4078-8e32-c0c30a1b237b\") " pod="openshift-etcd-operator/etcd-operator-b45778765-lpjkw" Jan 29 12:08:18 crc kubenswrapper[4753]: E0129 12:08:18.345993 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:18.845961185 +0000 UTC m=+113.098042630 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:18 crc kubenswrapper[4753]: I0129 12:08:18.346435 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:18 crc kubenswrapper[4753]: E0129 12:08:18.346878 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:18.846863352 +0000 UTC m=+113.098944807 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:18 crc kubenswrapper[4753]: I0129 12:08:18.350724 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-zm8vw" Jan 29 12:08:18 crc kubenswrapper[4753]: I0129 12:08:18.360969 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-79sb7" Jan 29 12:08:18 crc kubenswrapper[4753]: I0129 12:08:18.361852 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rx9j4\" (UniqueName: \"kubernetes.io/projected/d8fa6dd3-8d5a-4518-a95a-d1782279340d-kube-api-access-rx9j4\") pod \"multus-admission-controller-857f4d67dd-gln66\" (UID: \"d8fa6dd3-8d5a-4518-a95a-d1782279340d\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-gln66" Jan 29 12:08:18 crc kubenswrapper[4753]: I0129 12:08:18.368637 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-zdhjn" Jan 29 12:08:18 crc kubenswrapper[4753]: I0129 12:08:18.373565 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-gln66" Jan 29 12:08:18 crc kubenswrapper[4753]: I0129 12:08:18.373955 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-p7d5d" Jan 29 12:08:18 crc kubenswrapper[4753]: I0129 12:08:18.378514 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-qkqfh" Jan 29 12:08:18 crc kubenswrapper[4753]: I0129 12:08:18.385459 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f5dcg\" (UniqueName: \"kubernetes.io/projected/d379e39e-ec0e-4419-8ad9-3deae28a5729-kube-api-access-f5dcg\") pod \"machine-config-server-26486\" (UID: \"d379e39e-ec0e-4419-8ad9-3deae28a5729\") " pod="openshift-machine-config-operator/machine-config-server-26486" Jan 29 12:08:18 crc kubenswrapper[4753]: I0129 12:08:18.406950 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4cwgg\" (UniqueName: \"kubernetes.io/projected/4f48974c-8787-482a-b962-f7646e12952e-kube-api-access-4cwgg\") pod \"collect-profiles-29494800-86kw2\" (UID: \"4f48974c-8787-482a-b962-f7646e12952e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494800-86kw2" Jan 29 12:08:18 crc kubenswrapper[4753]: I0129 12:08:18.414954 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2kbdk" Jan 29 12:08:18 crc kubenswrapper[4753]: I0129 12:08:18.418371 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-spxzk\" (UniqueName: \"kubernetes.io/projected/e63ba05b-c0ce-43bc-b660-f5d93a3c45a5-kube-api-access-spxzk\") pod \"dns-default-jrblr\" (UID: \"e63ba05b-c0ce-43bc-b660-f5d93a3c45a5\") " pod="openshift-dns/dns-default-jrblr" Jan 29 12:08:18 crc kubenswrapper[4753]: I0129 12:08:18.421613 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-znsp6" Jan 29 12:08:18 crc kubenswrapper[4753]: I0129 12:08:18.426497 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-jrblr" Jan 29 12:08:18 crc kubenswrapper[4753]: I0129 12:08:18.437594 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/96da6032-b04a-4eef-8170-a57f2f9b00ba-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-nbdct\" (UID: \"96da6032-b04a-4eef-8170-a57f2f9b00ba\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-nbdct" Jan 29 12:08:18 crc kubenswrapper[4753]: I0129 12:08:18.504898 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-b2rb9" Jan 29 12:08:18 crc kubenswrapper[4753]: I0129 12:08:18.504931 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-rwlht" Jan 29 12:08:18 crc kubenswrapper[4753]: I0129 12:08:18.510261 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-mql86" Jan 29 12:08:18 crc kubenswrapper[4753]: I0129 12:08:18.510901 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-gntl4" Jan 29 12:08:18 crc kubenswrapper[4753]: I0129 12:08:18.512161 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:18 crc kubenswrapper[4753]: E0129 12:08:18.536072 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:19.036038714 +0000 UTC m=+113.288120179 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:18 crc kubenswrapper[4753]: I0129 12:08:18.538518 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-lpjkw" Jan 29 12:08:18 crc kubenswrapper[4753]: I0129 12:08:18.538853 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zpwp6\" (UniqueName: \"kubernetes.io/projected/8e54e1ee-a1b8-454c-8b09-e252ca3fe9da-kube-api-access-zpwp6\") pod \"router-default-5444994796-jpxd9\" (UID: \"8e54e1ee-a1b8-454c-8b09-e252ca3fe9da\") " pod="openshift-ingress/router-default-5444994796-jpxd9" Jan 29 12:08:18 crc kubenswrapper[4753]: I0129 12:08:18.540911 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/b6e8630c-f357-4c2e-88d0-45218ea79981-bound-sa-token\") pod \"ingress-operator-5b745b69d9-4bpqt\" (UID: \"b6e8630c-f357-4c2e-88d0-45218ea79981\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-4bpqt" Jan 29 12:08:18 crc kubenswrapper[4753]: I0129 12:08:18.541315 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-nbdct" Jan 29 12:08:18 crc kubenswrapper[4753]: I0129 12:08:18.542476 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kpq58\" (UniqueName: \"kubernetes.io/projected/8fd9a883-0e5e-424f-bc31-199f2103c548-kube-api-access-kpq58\") pod \"control-plane-machine-set-operator-78cbb6b69f-8dvz9\" (UID: \"8fd9a883-0e5e-424f-bc31-199f2103c548\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-8dvz9" Jan 29 12:08:18 crc kubenswrapper[4753]: I0129 12:08:18.544569 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sxd8k\" (UniqueName: \"kubernetes.io/projected/b41a4b6f-931f-41d8-86b4-a9f1193eb48a-kube-api-access-sxd8k\") pod \"olm-operator-6b444d44fb-v88cc\" (UID: \"b41a4b6f-931f-41d8-86b4-a9f1193eb48a\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-v88cc" Jan 29 12:08:18 crc kubenswrapper[4753]: I0129 12:08:18.549544 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wtgvz\" (UniqueName: \"kubernetes.io/projected/279ec67d-fb6e-49a2-8339-854132f4e3ab-kube-api-access-wtgvz\") pod \"kube-storage-version-migrator-operator-b67b599dd-fp6bq\" (UID: \"279ec67d-fb6e-49a2-8339-854132f4e3ab\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-fp6bq" Jan 29 12:08:18 crc kubenswrapper[4753]: I0129 12:08:18.589496 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-8dvz9" Jan 29 12:08:18 crc kubenswrapper[4753]: I0129 12:08:18.610764 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hndgf\" (UniqueName: \"kubernetes.io/projected/368534d6-5359-447b-afd1-2a1c16c0c5d8-kube-api-access-hndgf\") pod \"package-server-manager-789f6589d5-779px\" (UID: \"368534d6-5359-447b-afd1-2a1c16c0c5d8\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-779px" Jan 29 12:08:18 crc kubenswrapper[4753]: I0129 12:08:18.624383 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:18 crc kubenswrapper[4753]: E0129 12:08:18.624753 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:19.124740436 +0000 UTC m=+113.376821891 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:18 crc kubenswrapper[4753]: I0129 12:08:18.628816 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-j4xlb" Jan 29 12:08:18 crc kubenswrapper[4753]: I0129 12:08:18.631079 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bjvsq\" (UniqueName: \"kubernetes.io/projected/8fe1a105-96fa-4847-97a5-3d83d04a86ec-kube-api-access-bjvsq\") pod \"migrator-59844c95c7-gkf8g\" (UID: \"8fe1a105-96fa-4847-97a5-3d83d04a86ec\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-gkf8g" Jan 29 12:08:18 crc kubenswrapper[4753]: I0129 12:08:18.633358 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-779px" Jan 29 12:08:18 crc kubenswrapper[4753]: I0129 12:08:18.635335 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8t92k\" (UniqueName: \"kubernetes.io/projected/dad90295-65db-470c-8041-19fcf86d0439-kube-api-access-8t92k\") pod \"marketplace-operator-79b997595-lswml\" (UID: \"dad90295-65db-470c-8041-19fcf86d0439\") " pod="openshift-marketplace/marketplace-operator-79b997595-lswml" Jan 29 12:08:18 crc kubenswrapper[4753]: I0129 12:08:18.646032 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4ghtk\" (UniqueName: \"kubernetes.io/projected/3cea010a-6482-47b3-9c79-45432b921956-kube-api-access-4ghtk\") pod \"packageserver-d55dfcdfc-8kz2s\" (UID: \"3cea010a-6482-47b3-9c79-45432b921956\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-8kz2s" Jan 29 12:08:18 crc kubenswrapper[4753]: I0129 12:08:18.646481 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-v88cc" Jan 29 12:08:18 crc kubenswrapper[4753]: I0129 12:08:18.646695 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dpx95\" (UniqueName: \"kubernetes.io/projected/7a2fedca-4a36-42ec-a6b6-63fcd2de6c07-kube-api-access-dpx95\") pod \"machine-config-controller-84d6567774-6b2dk\" (UID: \"7a2fedca-4a36-42ec-a6b6-63fcd2de6c07\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-6b2dk" Jan 29 12:08:18 crc kubenswrapper[4753]: I0129 12:08:18.664285 4753 reflector.go:368] Caches populated for *v1.CertificateSigningRequest from k8s.io/client-go/tools/watch/informerwatcher.go:146 Jan 29 12:08:18 crc kubenswrapper[4753]: I0129 12:08:18.702627 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-26486" Jan 29 12:08:18 crc kubenswrapper[4753]: I0129 12:08:18.702855 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-lswml" Jan 29 12:08:18 crc kubenswrapper[4753]: I0129 12:08:18.703205 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494800-86kw2" Jan 29 12:08:18 crc kubenswrapper[4753]: I0129 12:08:18.770718 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:19 crc kubenswrapper[4753]: I0129 12:08:19.120074 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-4bpqt" Jan 29 12:08:19 crc kubenswrapper[4753]: I0129 12:08:19.120980 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-jpxd9" Jan 29 12:08:19 crc kubenswrapper[4753]: I0129 12:08:19.121837 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-8kz2s" Jan 29 12:08:19 crc kubenswrapper[4753]: I0129 12:08:19.124423 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-fp6bq" Jan 29 12:08:19 crc kubenswrapper[4753]: I0129 12:08:19.126711 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-6b2dk" Jan 29 12:08:19 crc kubenswrapper[4753]: I0129 12:08:19.129321 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-gkf8g" Jan 29 12:08:19 crc kubenswrapper[4753]: I0129 12:08:19.129874 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-dx8db"] Jan 29 12:08:19 crc kubenswrapper[4753]: E0129 12:08:19.132054 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:19.632005231 +0000 UTC m=+113.884086686 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:19 crc kubenswrapper[4753]: I0129 12:08:19.136380 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-g5td2"] Jan 29 12:08:19 crc kubenswrapper[4753]: I0129 12:08:19.204498 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd/etcd-crc"] Jan 29 12:08:19 crc kubenswrapper[4753]: I0129 12:08:19.223103 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:19 crc kubenswrapper[4753]: E0129 12:08:19.223823 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:19.723804765 +0000 UTC m=+113.975886220 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:19 crc kubenswrapper[4753]: I0129 12:08:19.326730 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:19 crc kubenswrapper[4753]: E0129 12:08:19.327437 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:19.827391187 +0000 UTC m=+114.079472662 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:19 crc kubenswrapper[4753]: I0129 12:08:19.341886 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-w5dnh" event={"ID":"887a7f51-26e3-4d7e-99ff-884f7389171a","Type":"ContainerStarted","Data":"50e4b1a785716c612c36e18fe6f1ca7850c995dd5c05be5ae82cc0dad77d378c"} Jan 29 12:08:19 crc kubenswrapper[4753]: I0129 12:08:19.347095 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-ltcjq" event={"ID":"ddba54c6-0a85-4055-bc93-89e961961cd7","Type":"ContainerStarted","Data":"246074dedf29b8fc689e9bda10902eaf1da29d1801bf5b23556c351b611114e0"} Jan 29 12:08:19 crc kubenswrapper[4753]: I0129 12:08:19.385360 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-f6z5s" event={"ID":"4b74c52f-102d-45ae-a789-0c43429e8aa0","Type":"ContainerStarted","Data":"7187af2929e6007509d3931219d843f944c201a391a755b6a2e6012db7cb5206"} Jan 29 12:08:19 crc kubenswrapper[4753]: I0129 12:08:19.385437 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-f6z5s" event={"ID":"4b74c52f-102d-45ae-a789-0c43429e8aa0","Type":"ContainerStarted","Data":"91fd2f0bc852112f62b4be54dd50b108c3c78c35ca067a57d4cce579e9a343d6"} Jan 29 12:08:19 crc kubenswrapper[4753]: I0129 12:08:19.391314 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-5wmqw" event={"ID":"b3a97911-76f6-4098-91b6-a67368bcac8e","Type":"ContainerStarted","Data":"ad460f3dab5ce7783aae1fe6b15cd65ec087fa7dcd2239f14751ec620a76ade1"} Jan 29 12:08:19 crc kubenswrapper[4753]: I0129 12:08:19.401949 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-nrt2l" event={"ID":"86659644-dec3-4e1a-ba32-5b4487a2f4c8","Type":"ContainerStarted","Data":"abda8ae7a0e053d245216a7e75ba921266ca0e5110a4a498f9c76ef8c826be78"} Jan 29 12:08:19 crc kubenswrapper[4753]: I0129 12:08:19.432188 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:19 crc kubenswrapper[4753]: E0129 12:08:19.432903 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:19.932888485 +0000 UTC m=+114.184969940 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:19 crc kubenswrapper[4753]: I0129 12:08:19.528453 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-bmd6j"] Jan 29 12:08:19 crc kubenswrapper[4753]: I0129 12:08:19.537135 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:19 crc kubenswrapper[4753]: E0129 12:08:19.537426 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:20.037361744 +0000 UTC m=+114.289443199 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:19 crc kubenswrapper[4753]: I0129 12:08:19.537565 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:19 crc kubenswrapper[4753]: E0129 12:08:19.539337 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:20.039317192 +0000 UTC m=+114.291398647 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:19 crc kubenswrapper[4753]: I0129 12:08:19.640190 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:19 crc kubenswrapper[4753]: E0129 12:08:19.641269 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:20.141199043 +0000 UTC m=+114.393280498 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:19 crc kubenswrapper[4753]: I0129 12:08:19.653270 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:19 crc kubenswrapper[4753]: E0129 12:08:19.654494 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:20.154472766 +0000 UTC m=+114.406554221 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:19 crc kubenswrapper[4753]: E0129 12:08:19.675859 4753 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod86659644_dec3_4e1a_ba32_5b4487a2f4c8.slice/crio-conmon-abda8ae7a0e053d245216a7e75ba921266ca0e5110a4a498f9c76ef8c826be78.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod86659644_dec3_4e1a_ba32_5b4487a2f4c8.slice/crio-abda8ae7a0e053d245216a7e75ba921266ca0e5110a4a498f9c76ef8c826be78.scope\": RecentStats: unable to find data in memory cache]" Jan 29 12:08:19 crc kubenswrapper[4753]: I0129 12:08:19.780606 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:19 crc kubenswrapper[4753]: E0129 12:08:19.781343 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:20.281322005 +0000 UTC m=+114.533403460 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:20 crc kubenswrapper[4753]: I0129 12:08:20.056848 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:20 crc kubenswrapper[4753]: E0129 12:08:20.057554 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:20.55721846 +0000 UTC m=+114.809299915 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:20 crc kubenswrapper[4753]: I0129 12:08:20.157903 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:20 crc kubenswrapper[4753]: E0129 12:08:20.158443 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:20.658423252 +0000 UTC m=+114.910504707 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:20 crc kubenswrapper[4753]: I0129 12:08:20.265680 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:20 crc kubenswrapper[4753]: E0129 12:08:20.267392 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:20.767373032 +0000 UTC m=+115.019454487 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:20 crc kubenswrapper[4753]: I0129 12:08:20.602371 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:20 crc kubenswrapper[4753]: E0129 12:08:20.602824 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:21.102803008 +0000 UTC m=+115.354884463 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:20 crc kubenswrapper[4753]: I0129 12:08:20.716036 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:20 crc kubenswrapper[4753]: E0129 12:08:20.716743 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:21.216718946 +0000 UTC m=+115.468800401 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:20 crc kubenswrapper[4753]: I0129 12:08:20.911675 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:20 crc kubenswrapper[4753]: E0129 12:08:20.912577 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:21.412546424 +0000 UTC m=+115.664627879 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:20 crc kubenswrapper[4753]: I0129 12:08:20.923514 4753 generic.go:334] "Generic (PLEG): container finished" podID="4b74c52f-102d-45ae-a789-0c43429e8aa0" containerID="7187af2929e6007509d3931219d843f944c201a391a755b6a2e6012db7cb5206" exitCode=0 Jan 29 12:08:20 crc kubenswrapper[4753]: I0129 12:08:20.923633 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-f6z5s" event={"ID":"4b74c52f-102d-45ae-a789-0c43429e8aa0","Type":"ContainerDied","Data":"7187af2929e6007509d3931219d843f944c201a391a755b6a2e6012db7cb5206"} Jan 29 12:08:20 crc kubenswrapper[4753]: I0129 12:08:20.942405 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-bmd6j" event={"ID":"fd8ff4f5-94ee-4772-b6d8-d0d28d433891","Type":"ContainerStarted","Data":"fdcad25f847a982348cb1cb34135d165fc43a9d371507fb35299b46114edc631"} Jan 29 12:08:20 crc kubenswrapper[4753]: I0129 12:08:20.950064 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-jpxd9" event={"ID":"8e54e1ee-a1b8-454c-8b09-e252ca3fe9da","Type":"ContainerStarted","Data":"4c34f65cc494e8a27dc5718e8675e081313a6a635ce0f9714d30a07aeddc594d"} Jan 29 12:08:20 crc kubenswrapper[4753]: I0129 12:08:20.952940 4753 generic.go:334] "Generic (PLEG): container finished" podID="86659644-dec3-4e1a-ba32-5b4487a2f4c8" containerID="abda8ae7a0e053d245216a7e75ba921266ca0e5110a4a498f9c76ef8c826be78" exitCode=0 Jan 29 12:08:20 crc kubenswrapper[4753]: I0129 12:08:20.953087 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-nrt2l" event={"ID":"86659644-dec3-4e1a-ba32-5b4487a2f4c8","Type":"ContainerDied","Data":"abda8ae7a0e053d245216a7e75ba921266ca0e5110a4a498f9c76ef8c826be78"} Jan 29 12:08:20 crc kubenswrapper[4753]: I0129 12:08:20.977603 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-dx8db" event={"ID":"6a22bc59-6a04-49b4-aa79-225e9571ee71","Type":"ContainerStarted","Data":"2e4e2110436710137fb70cf18d45e15b968ed1441affd071e5f30e0f02a3b5d4"} Jan 29 12:08:21 crc kubenswrapper[4753]: I0129 12:08:21.053419 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-255vg" event={"ID":"43f6f4f5-8e79-4142-95ee-84b051b27cf3","Type":"ContainerStarted","Data":"372248ba3b0bca867fbc70fb481786d6bea287295b6c73361a005d4f514fe4bc"} Jan 29 12:08:21 crc kubenswrapper[4753]: I0129 12:08:21.218796 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:21 crc kubenswrapper[4753]: E0129 12:08:21.219271 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:21.719254261 +0000 UTC m=+115.971335716 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:21 crc kubenswrapper[4753]: I0129 12:08:21.376614 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:21 crc kubenswrapper[4753]: E0129 12:08:21.376913 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:21.876894621 +0000 UTC m=+116.128976066 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:21 crc kubenswrapper[4753]: I0129 12:08:21.392379 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd/etcd-crc" podStartSLOduration=2.392356468 podStartE2EDuration="2.392356468s" podCreationTimestamp="2026-01-29 12:08:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:08:21.391889674 +0000 UTC m=+115.643971139" watchObservedRunningTime="2026-01-29 12:08:21.392356468 +0000 UTC m=+115.644437923" Jan 29 12:08:21 crc kubenswrapper[4753]: I0129 12:08:21.449118 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication-operator/authentication-operator-69f744f599-5wmqw" podStartSLOduration=84.449092055 podStartE2EDuration="1m24.449092055s" podCreationTimestamp="2026-01-29 12:06:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:08:21.422054046 +0000 UTC m=+115.674135501" watchObservedRunningTime="2026-01-29 12:08:21.449092055 +0000 UTC m=+115.701173500" Jan 29 12:08:21 crc kubenswrapper[4753]: I0129 12:08:21.450773 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-86kqx"] Jan 29 12:08:21 crc kubenswrapper[4753]: I0129 12:08:21.472105 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-s4nq8"] Jan 29 12:08:21 crc kubenswrapper[4753]: I0129 12:08:21.477892 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:21 crc kubenswrapper[4753]: E0129 12:08:21.478412 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:21.978396852 +0000 UTC m=+116.230478307 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:21 crc kubenswrapper[4753]: I0129 12:08:21.496948 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-zm8vw"] Jan 29 12:08:21 crc kubenswrapper[4753]: I0129 12:08:21.517130 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-w5dnh" podStartSLOduration=83.517105346 podStartE2EDuration="1m23.517105346s" podCreationTimestamp="2026-01-29 12:06:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:08:21.490303153 +0000 UTC m=+115.742384608" watchObservedRunningTime="2026-01-29 12:08:21.517105346 +0000 UTC m=+115.769186801" Jan 29 12:08:21 crc kubenswrapper[4753]: I0129 12:08:21.518917 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-qkqfh"] Jan 29 12:08:21 crc kubenswrapper[4753]: I0129 12:08:21.579342 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:21 crc kubenswrapper[4753]: E0129 12:08:21.580614 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:22.080587052 +0000 UTC m=+116.332668507 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:21 crc kubenswrapper[4753]: I0129 12:08:21.582155 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:21 crc kubenswrapper[4753]: E0129 12:08:21.582689 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:22.082676574 +0000 UTC m=+116.334758099 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:21 crc kubenswrapper[4753]: I0129 12:08:21.683693 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:21 crc kubenswrapper[4753]: E0129 12:08:21.683853 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:22.183829584 +0000 UTC m=+116.435911049 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:21 crc kubenswrapper[4753]: I0129 12:08:21.683917 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:21 crc kubenswrapper[4753]: E0129 12:08:21.684379 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:22.18436923 +0000 UTC m=+116.436450685 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:21 crc kubenswrapper[4753]: W0129 12:08:21.771757 4753 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2d66d4a7_6db6_438c_9d2b_cb2bb0ef846b.slice/crio-32736033969cae3335b82a78f3d7888c24ba44f14d36c6186b482da22e45008b WatchSource:0}: Error finding container 32736033969cae3335b82a78f3d7888c24ba44f14d36c6186b482da22e45008b: Status 404 returned error can't find the container with id 32736033969cae3335b82a78f3d7888c24ba44f14d36c6186b482da22e45008b Jan 29 12:08:21 crc kubenswrapper[4753]: I0129 12:08:21.778183 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-kv2r9"] Jan 29 12:08:21 crc kubenswrapper[4753]: I0129 12:08:21.784417 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:21 crc kubenswrapper[4753]: E0129 12:08:21.784747 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:22.284726257 +0000 UTC m=+116.536807712 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:21 crc kubenswrapper[4753]: I0129 12:08:21.789569 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-6xpng"] Jan 29 12:08:21 crc kubenswrapper[4753]: I0129 12:08:21.886414 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:21 crc kubenswrapper[4753]: E0129 12:08:21.887139 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:22.387116374 +0000 UTC m=+116.639197829 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:21 crc kubenswrapper[4753]: I0129 12:08:21.987403 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:21 crc kubenswrapper[4753]: E0129 12:08:21.987771 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:22.487754358 +0000 UTC m=+116.739835813 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:22 crc kubenswrapper[4753]: I0129 12:08:22.149698 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:22 crc kubenswrapper[4753]: E0129 12:08:22.150089 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:22.650075847 +0000 UTC m=+116.902157302 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:22 crc kubenswrapper[4753]: I0129 12:08:22.252093 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:22 crc kubenswrapper[4753]: E0129 12:08:22.253812 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:22.753781342 +0000 UTC m=+117.005862797 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:22 crc kubenswrapper[4753]: I0129 12:08:22.361420 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:22 crc kubenswrapper[4753]: E0129 12:08:22.361982 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:22.86196069 +0000 UTC m=+117.114042145 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:22 crc kubenswrapper[4753]: I0129 12:08:22.423985 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-s4nq8" event={"ID":"2d66d4a7-6db6-438c-9d2b-cb2bb0ef846b","Type":"ContainerStarted","Data":"32736033969cae3335b82a78f3d7888c24ba44f14d36c6186b482da22e45008b"} Jan 29 12:08:22 crc kubenswrapper[4753]: I0129 12:08:22.442780 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-g5td2" event={"ID":"3519d984-8bed-45ca-9e6d-687bec69ee24","Type":"ContainerStarted","Data":"e5fab3714ef9a58ae4dad9e60689a4282297c4a279b4e5f6a492ab41dc2f278c"} Jan 29 12:08:22 crc kubenswrapper[4753]: I0129 12:08:22.462826 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:22 crc kubenswrapper[4753]: E0129 12:08:22.463760 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:22.963739709 +0000 UTC m=+117.215821164 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:22 crc kubenswrapper[4753]: I0129 12:08:22.492438 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-ltcjq" podStartSLOduration=85.492419637 podStartE2EDuration="1m25.492419637s" podCreationTimestamp="2026-01-29 12:06:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:08:22.448019684 +0000 UTC m=+116.700101139" watchObservedRunningTime="2026-01-29 12:08:22.492419637 +0000 UTC m=+116.744501092" Jan 29 12:08:22 crc kubenswrapper[4753]: I0129 12:08:22.518596 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-q77m6" podStartSLOduration=84.51857124 podStartE2EDuration="1m24.51857124s" podCreationTimestamp="2026-01-29 12:06:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:08:22.495647082 +0000 UTC m=+116.747728547" watchObservedRunningTime="2026-01-29 12:08:22.51857124 +0000 UTC m=+116.770652695" Jan 29 12:08:22 crc kubenswrapper[4753]: I0129 12:08:22.522449 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-26486" event={"ID":"d379e39e-ec0e-4419-8ad9-3deae28a5729","Type":"ContainerStarted","Data":"ced0a290c7ceb83ebd79c745181137076a6183bcb96692e6362051c9c37c2153"} Jan 29 12:08:22 crc kubenswrapper[4753]: I0129 12:08:22.550035 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-dx8db" Jan 29 12:08:22 crc kubenswrapper[4753]: I0129 12:08:22.555208 4753 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-dx8db container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.6:8443/healthz\": dial tcp 10.217.0.6:8443: connect: connection refused" start-of-body= Jan 29 12:08:22 crc kubenswrapper[4753]: I0129 12:08:22.555335 4753 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-dx8db" podUID="6a22bc59-6a04-49b4-aa79-225e9571ee71" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.6:8443/healthz\": dial tcp 10.217.0.6:8443: connect: connection refused" Jan 29 12:08:22 crc kubenswrapper[4753]: I0129 12:08:22.564261 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:22 crc kubenswrapper[4753]: E0129 12:08:22.564776 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:23.064759365 +0000 UTC m=+117.316840820 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:22 crc kubenswrapper[4753]: I0129 12:08:22.685707 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:22 crc kubenswrapper[4753]: E0129 12:08:22.702353 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:23.20226975 +0000 UTC m=+117.454351215 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:22 crc kubenswrapper[4753]: I0129 12:08:22.795260 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:22 crc kubenswrapper[4753]: E0129 12:08:22.795836 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:23.295813605 +0000 UTC m=+117.547895130 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:23 crc kubenswrapper[4753]: I0129 12:08:22.905072 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:23 crc kubenswrapper[4753]: E0129 12:08:22.905750 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:23.405716724 +0000 UTC m=+117.657798179 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:23 crc kubenswrapper[4753]: I0129 12:08:23.006678 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:23 crc kubenswrapper[4753]: E0129 12:08:23.014454 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:23.514423357 +0000 UTC m=+117.766504812 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:23 crc kubenswrapper[4753]: I0129 12:08:23.110824 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:23 crc kubenswrapper[4753]: E0129 12:08:23.111143 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:23.611125156 +0000 UTC m=+117.863206611 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:23 crc kubenswrapper[4753]: I0129 12:08:23.191404 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-dx8db" podStartSLOduration=84.191365288 podStartE2EDuration="1m24.191365288s" podCreationTimestamp="2026-01-29 12:06:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:08:23.177857198 +0000 UTC m=+117.429938653" watchObservedRunningTime="2026-01-29 12:08:23.191365288 +0000 UTC m=+117.443446743" Jan 29 12:08:23 crc kubenswrapper[4753]: I0129 12:08:23.223486 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:23 crc kubenswrapper[4753]: E0129 12:08:23.223957 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:23.723938641 +0000 UTC m=+117.976020096 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:23 crc kubenswrapper[4753]: I0129 12:08:23.332147 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:23 crc kubenswrapper[4753]: E0129 12:08:23.332850 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:23.83283275 +0000 UTC m=+118.084914205 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:23 crc kubenswrapper[4753]: I0129 12:08:23.669622 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:23 crc kubenswrapper[4753]: E0129 12:08:23.670406 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:24.170388517 +0000 UTC m=+118.422469972 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:23 crc kubenswrapper[4753]: I0129 12:08:23.804131 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:23 crc kubenswrapper[4753]: E0129 12:08:23.804693 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:24.304650756 +0000 UTC m=+118.556732211 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:23 crc kubenswrapper[4753]: I0129 12:08:23.819523 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-kv2r9" event={"ID":"330eaab5-20d9-4b5c-a8f2-8bed8d2df7b7","Type":"ContainerStarted","Data":"ef293ade999bf56c8bc60f7a0c44766284c6c3d8eb2beca68fc26c6c32b17817"} Jan 29 12:08:23 crc kubenswrapper[4753]: I0129 12:08:23.820568 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-qkqfh" event={"ID":"e0c82664-4c0e-4f03-858f-dcf68539f92a","Type":"ContainerStarted","Data":"e5f83dc60c1ce4d07e4e84fbb116b1bffe1ec591f87b47a52f2c69ba0b9bb991"} Jan 29 12:08:23 crc kubenswrapper[4753]: I0129 12:08:23.821383 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-86kqx" event={"ID":"3628b07e-d247-4487-8095-821bf56656b8","Type":"ContainerStarted","Data":"b57d2cb29c67098417afae87af728ea55281aabfc05e533dc00bceea07872650"} Jan 29 12:08:23 crc kubenswrapper[4753]: I0129 12:08:23.822218 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-6xpng" event={"ID":"d2148dab-1475-49ad-b154-99cd20f15125","Type":"ContainerStarted","Data":"ac1f1fad61087fc73ed4859e0fb9f4332e21a13332492807f59da917aeae96cb"} Jan 29 12:08:23 crc kubenswrapper[4753]: I0129 12:08:23.823855 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-dx8db" event={"ID":"6a22bc59-6a04-49b4-aa79-225e9571ee71","Type":"ContainerStarted","Data":"c65e0e7b24bf4c9f886988311d2280381b0facfcaf1b00f7415658001768b087"} Jan 29 12:08:23 crc kubenswrapper[4753]: I0129 12:08:23.825207 4753 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-dx8db container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.6:8443/healthz\": dial tcp 10.217.0.6:8443: connect: connection refused" start-of-body= Jan 29 12:08:23 crc kubenswrapper[4753]: I0129 12:08:23.825305 4753 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-dx8db" podUID="6a22bc59-6a04-49b4-aa79-225e9571ee71" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.6:8443/healthz\": dial tcp 10.217.0.6:8443: connect: connection refused" Jan 29 12:08:23 crc kubenswrapper[4753]: I0129 12:08:23.828039 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-zm8vw" event={"ID":"6fa51f84-9cf2-491a-bc13-0b2c4b48c5c0","Type":"ContainerStarted","Data":"15b466a1511d99cd076a0f74f0904ff621ee0dd407e0753de3fb64e0b2d027ad"} Jan 29 12:08:23 crc kubenswrapper[4753]: I0129 12:08:23.840828 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress/router-default-5444994796-jpxd9" podStartSLOduration=85.840804575 podStartE2EDuration="1m25.840804575s" podCreationTimestamp="2026-01-29 12:06:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:08:23.839928479 +0000 UTC m=+118.092009934" watchObservedRunningTime="2026-01-29 12:08:23.840804575 +0000 UTC m=+118.092886050" Jan 29 12:08:24 crc kubenswrapper[4753]: I0129 12:08:24.100574 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:24 crc kubenswrapper[4753]: E0129 12:08:24.102708 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:24.602693506 +0000 UTC m=+118.854774961 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:24 crc kubenswrapper[4753]: I0129 12:08:24.143914 4753 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5444994796-jpxd9" Jan 29 12:08:24 crc kubenswrapper[4753]: I0129 12:08:24.202846 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:24 crc kubenswrapper[4753]: E0129 12:08:24.204046 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:24.704001781 +0000 UTC m=+118.956083346 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:24 crc kubenswrapper[4753]: I0129 12:08:24.308030 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:24 crc kubenswrapper[4753]: E0129 12:08:24.308571 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:24.808555132 +0000 UTC m=+119.060636587 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:24 crc kubenswrapper[4753]: I0129 12:08:24.413705 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:24 crc kubenswrapper[4753]: E0129 12:08:24.414096 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:24.914070301 +0000 UTC m=+119.166151756 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:24 crc kubenswrapper[4753]: I0129 12:08:24.515692 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:24 crc kubenswrapper[4753]: E0129 12:08:24.516162 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:25.016143029 +0000 UTC m=+119.268224484 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:24 crc kubenswrapper[4753]: I0129 12:08:24.594239 4753 patch_prober.go:28] interesting pod/router-default-5444994796-jpxd9 container/router namespace/openshift-ingress: Startup probe status=failure output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" start-of-body= Jan 29 12:08:24 crc kubenswrapper[4753]: I0129 12:08:24.594485 4753 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-jpxd9" podUID="8e54e1ee-a1b8-454c-8b09-e252ca3fe9da" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" Jan 29 12:08:24 crc kubenswrapper[4753]: E0129 12:08:24.649445 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:25.149409748 +0000 UTC m=+119.401491203 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:24 crc kubenswrapper[4753]: I0129 12:08:24.649285 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:24 crc kubenswrapper[4753]: I0129 12:08:24.650115 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:24 crc kubenswrapper[4753]: E0129 12:08:24.650667 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:25.150648745 +0000 UTC m=+119.402730200 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:24 crc kubenswrapper[4753]: I0129 12:08:24.751572 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:24 crc kubenswrapper[4753]: E0129 12:08:24.752115 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:25.252078339 +0000 UTC m=+119.504159794 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:24 crc kubenswrapper[4753]: I0129 12:08:24.843934 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-26486" event={"ID":"d379e39e-ec0e-4419-8ad9-3deae28a5729","Type":"ContainerStarted","Data":"cf075c928d1f462e5386c055c907ec113917a55ef48d644fe4451719379269a3"} Jan 29 12:08:24 crc kubenswrapper[4753]: I0129 12:08:24.856854 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:24 crc kubenswrapper[4753]: E0129 12:08:24.857515 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:25.357497486 +0000 UTC m=+119.609578941 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:24 crc kubenswrapper[4753]: I0129 12:08:24.858002 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-jpxd9" event={"ID":"8e54e1ee-a1b8-454c-8b09-e252ca3fe9da","Type":"ContainerStarted","Data":"30efecfb5eb0b19d12536ebb0c79596f1cd704b056c9e4c09cc2f90c2909f107"} Jan 29 12:08:24 crc kubenswrapper[4753]: I0129 12:08:24.903367 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-nrt2l" event={"ID":"86659644-dec3-4e1a-ba32-5b4487a2f4c8","Type":"ContainerStarted","Data":"b82e8af3764b734b319c447bb71482a1a48f8b88ff97e3e1b4d9a0ff5f956a63"} Jan 29 12:08:24 crc kubenswrapper[4753]: I0129 12:08:24.904198 4753 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-dx8db container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.6:8443/healthz\": dial tcp 10.217.0.6:8443: connect: connection refused" start-of-body= Jan 29 12:08:24 crc kubenswrapper[4753]: I0129 12:08:24.904296 4753 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-dx8db" podUID="6a22bc59-6a04-49b4-aa79-225e9571ee71" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.6:8443/healthz\": dial tcp 10.217.0.6:8443: connect: connection refused" Jan 29 12:08:25 crc kubenswrapper[4753]: I0129 12:08:25.063219 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:25 crc kubenswrapper[4753]: E0129 12:08:25.063510 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:25.563481675 +0000 UTC m=+119.815563140 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:25 crc kubenswrapper[4753]: I0129 12:08:25.063832 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:25 crc kubenswrapper[4753]: E0129 12:08:25.064345 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:25.56431963 +0000 UTC m=+119.816401085 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:25 crc kubenswrapper[4753]: I0129 12:08:25.274076 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:25 crc kubenswrapper[4753]: E0129 12:08:25.275537 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:25.775512744 +0000 UTC m=+120.027594199 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:25 crc kubenswrapper[4753]: I0129 12:08:25.460687 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:25 crc kubenswrapper[4753]: E0129 12:08:25.461185 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:25.961170242 +0000 UTC m=+120.213251697 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:25 crc kubenswrapper[4753]: I0129 12:08:25.604661 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:25 crc kubenswrapper[4753]: E0129 12:08:25.605183 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:26.105157189 +0000 UTC m=+120.357238644 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:25 crc kubenswrapper[4753]: I0129 12:08:25.622532 4753 csr.go:261] certificate signing request csr-cwzql is approved, waiting to be issued Jan 29 12:08:25 crc kubenswrapper[4753]: I0129 12:08:25.636792 4753 csr.go:257] certificate signing request csr-cwzql is issued Jan 29 12:08:25 crc kubenswrapper[4753]: I0129 12:08:25.706177 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:25 crc kubenswrapper[4753]: E0129 12:08:25.707028 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:26.206992009 +0000 UTC m=+120.459073464 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:25 crc kubenswrapper[4753]: I0129 12:08:25.807938 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:25 crc kubenswrapper[4753]: E0129 12:08:25.808156 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:26.308118909 +0000 UTC m=+120.560200374 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:25 crc kubenswrapper[4753]: I0129 12:08:25.809095 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:25 crc kubenswrapper[4753]: E0129 12:08:25.809581 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:26.309557762 +0000 UTC m=+120.561639417 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:25 crc kubenswrapper[4753]: I0129 12:08:25.810265 4753 patch_prober.go:28] interesting pod/router-default-5444994796-jpxd9 container/router namespace/openshift-ingress: Startup probe status=failure output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" start-of-body= Jan 29 12:08:25 crc kubenswrapper[4753]: I0129 12:08:25.810313 4753 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-jpxd9" podUID="8e54e1ee-a1b8-454c-8b09-e252ca3fe9da" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" Jan 29 12:08:25 crc kubenswrapper[4753]: I0129 12:08:25.920335 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:25 crc kubenswrapper[4753]: E0129 12:08:25.921161 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:26.42113258 +0000 UTC m=+120.673214035 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:26 crc kubenswrapper[4753]: I0129 12:08:26.025721 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:26 crc kubenswrapper[4753]: E0129 12:08:26.026955 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:26.526905437 +0000 UTC m=+120.778986892 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:26 crc kubenswrapper[4753]: I0129 12:08:26.240446 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:26 crc kubenswrapper[4753]: E0129 12:08:26.241031 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:26.741007867 +0000 UTC m=+120.993089322 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:26 crc kubenswrapper[4753]: I0129 12:08:26.254321 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-s4nq8" event={"ID":"2d66d4a7-6db6-438c-9d2b-cb2bb0ef846b","Type":"ContainerStarted","Data":"25052aaee17e65299add9ab94060855064c779c647061f27832330d54c183d8e"} Jan 29 12:08:26 crc kubenswrapper[4753]: I0129 12:08:26.255642 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-g5td2" event={"ID":"3519d984-8bed-45ca-9e6d-687bec69ee24","Type":"ContainerStarted","Data":"d3a67680d34c36c1cd4bd86ef98d1dc275ee91bd903efe2c3979f0dbf765698d"} Jan 29 12:08:26 crc kubenswrapper[4753]: I0129 12:08:26.256711 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-255vg" event={"ID":"43f6f4f5-8e79-4142-95ee-84b051b27cf3","Type":"ContainerStarted","Data":"fd0e93d79077d3d6c5b7aee0174bcf72c9185177d145898ee80877492b2dc5d0"} Jan 29 12:08:26 crc kubenswrapper[4753]: I0129 12:08:26.266420 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-86kqx" event={"ID":"3628b07e-d247-4487-8095-821bf56656b8","Type":"ContainerStarted","Data":"2268ec3aed83352853aba4c1e7db0f6a129e057add397b1db4b3b469771803ee"} Jan 29 12:08:26 crc kubenswrapper[4753]: I0129 12:08:26.269055 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-879f6c89f-86kqx" Jan 29 12:08:26 crc kubenswrapper[4753]: I0129 12:08:26.282423 4753 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-86kqx container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.24:8443/healthz\": dial tcp 10.217.0.24:8443: connect: connection refused" start-of-body= Jan 29 12:08:26 crc kubenswrapper[4753]: I0129 12:08:26.282531 4753 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-86kqx" podUID="3628b07e-d247-4487-8095-821bf56656b8" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.24:8443/healthz\": dial tcp 10.217.0.24:8443: connect: connection refused" Jan 29 12:08:26 crc kubenswrapper[4753]: I0129 12:08:26.285903 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-zm8vw" event={"ID":"6fa51f84-9cf2-491a-bc13-0b2c4b48c5c0","Type":"ContainerStarted","Data":"3e6a7a63abfeb3f9ec4d81a1e56c080cb950c27dedfe20ad33a23fd10bc89ea0"} Jan 29 12:08:26 crc kubenswrapper[4753]: I0129 12:08:26.312248 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-f6z5s" event={"ID":"4b74c52f-102d-45ae-a789-0c43429e8aa0","Type":"ContainerStarted","Data":"f26b9fc58f6c38a0f2554fee19546d92d7c32f4294b486352ef916a427a0b051"} Jan 29 12:08:26 crc kubenswrapper[4753]: I0129 12:08:26.313043 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-f6z5s" Jan 29 12:08:26 crc kubenswrapper[4753]: I0129 12:08:26.325589 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-server-26486" podStartSLOduration=11.325556436 podStartE2EDuration="11.325556436s" podCreationTimestamp="2026-01-29 12:08:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:08:24.914273074 +0000 UTC m=+119.166354529" watchObservedRunningTime="2026-01-29 12:08:26.325556436 +0000 UTC m=+120.577637891" Jan 29 12:08:26 crc kubenswrapper[4753]: I0129 12:08:26.330365 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-bmd6j" event={"ID":"fd8ff4f5-94ee-4772-b6d8-d0d28d433891","Type":"ContainerStarted","Data":"3375b4b8490835c8423618a96c0de8810a2f76dc2aefced2e556b1f20e49e0bf"} Jan 29 12:08:26 crc kubenswrapper[4753]: I0129 12:08:26.502504 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns-operator/dns-operator-744455d44c-255vg" podStartSLOduration=88.502451016 podStartE2EDuration="1m28.502451016s" podCreationTimestamp="2026-01-29 12:06:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:08:26.324925127 +0000 UTC m=+120.577006592" watchObservedRunningTime="2026-01-29 12:08:26.502451016 +0000 UTC m=+120.754532481" Jan 29 12:08:26 crc kubenswrapper[4753]: I0129 12:08:26.504193 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:26 crc kubenswrapper[4753]: E0129 12:08:26.507826 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:27.007799994 +0000 UTC m=+121.259881639 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:26 crc kubenswrapper[4753]: I0129 12:08:26.606069 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:26 crc kubenswrapper[4753]: E0129 12:08:26.606298 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:27.106242844 +0000 UTC m=+121.358324299 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:26 crc kubenswrapper[4753]: I0129 12:08:26.606840 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:26 crc kubenswrapper[4753]: E0129 12:08:26.608923 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:27.108905892 +0000 UTC m=+121.360987417 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:26 crc kubenswrapper[4753]: I0129 12:08:26.647881 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2027-01-29 12:03:25 +0000 UTC, rotation deadline is 2026-11-23 17:47:06.173006791 +0000 UTC Jan 29 12:08:26 crc kubenswrapper[4753]: I0129 12:08:26.647914 4753 certificate_manager.go:356] kubernetes.io/kubelet-serving: Waiting 7157h38m39.525094446s for next certificate rotation Jan 29 12:08:26 crc kubenswrapper[4753]: I0129 12:08:26.648828 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-config-operator/openshift-config-operator-7777fb866f-f6z5s" podStartSLOduration=88.648804522 podStartE2EDuration="1m28.648804522s" podCreationTimestamp="2026-01-29 12:06:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:08:26.647686349 +0000 UTC m=+120.899767804" watchObservedRunningTime="2026-01-29 12:08:26.648804522 +0000 UTC m=+120.900885977" Jan 29 12:08:26 crc kubenswrapper[4753]: I0129 12:08:26.709324 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:26 crc kubenswrapper[4753]: E0129 12:08:26.709576 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:27.209535247 +0000 UTC m=+121.461616712 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:26 crc kubenswrapper[4753]: I0129 12:08:26.713056 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:26 crc kubenswrapper[4753]: E0129 12:08:26.713606 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:27.213589647 +0000 UTC m=+121.465671102 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:26 crc kubenswrapper[4753]: I0129 12:08:26.721216 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca/service-ca-9c57cc56f-zm8vw" podStartSLOduration=87.721188161 podStartE2EDuration="1m27.721188161s" podCreationTimestamp="2026-01-29 12:06:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:08:26.691941627 +0000 UTC m=+120.944023082" watchObservedRunningTime="2026-01-29 12:08:26.721188161 +0000 UTC m=+120.973269616" Jan 29 12:08:26 crc kubenswrapper[4753]: E0129 12:08:26.968513 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:27.468480582 +0000 UTC m=+121.720562037 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:26 crc kubenswrapper[4753]: I0129 12:08:26.968212 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:26 crc kubenswrapper[4753]: I0129 12:08:26.969368 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:26 crc kubenswrapper[4753]: E0129 12:08:26.969856 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:27.469842212 +0000 UTC m=+121.721923667 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:27 crc kubenswrapper[4753]: I0129 12:08:27.121476 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:27 crc kubenswrapper[4753]: E0129 12:08:27.121625 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:27.621604538 +0000 UTC m=+121.873685993 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:27 crc kubenswrapper[4753]: I0129 12:08:27.121796 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:27 crc kubenswrapper[4753]: E0129 12:08:27.122398 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:27.622386851 +0000 UTC m=+121.874468306 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:27 crc kubenswrapper[4753]: I0129 12:08:27.140551 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-879f6c89f-86kqx" podStartSLOduration=89.140527538 podStartE2EDuration="1m29.140527538s" podCreationTimestamp="2026-01-29 12:06:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:08:26.962927718 +0000 UTC m=+121.215009163" watchObservedRunningTime="2026-01-29 12:08:27.140527538 +0000 UTC m=+121.392608993" Jan 29 12:08:27 crc kubenswrapper[4753]: I0129 12:08:27.141581 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-bmd6j" podStartSLOduration=89.141571738 podStartE2EDuration="1m29.141571738s" podCreationTimestamp="2026-01-29 12:06:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:08:27.13992675 +0000 UTC m=+121.392008215" watchObservedRunningTime="2026-01-29 12:08:27.141571738 +0000 UTC m=+121.393653193" Jan 29 12:08:27 crc kubenswrapper[4753]: I0129 12:08:27.264065 4753 patch_prober.go:28] interesting pod/router-default-5444994796-jpxd9 container/router namespace/openshift-ingress: Startup probe status=failure output="Get \"http://localhost:1936/healthz/ready\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 29 12:08:27 crc kubenswrapper[4753]: I0129 12:08:27.264160 4753 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-jpxd9" podUID="8e54e1ee-a1b8-454c-8b09-e252ca3fe9da" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 29 12:08:27 crc kubenswrapper[4753]: I0129 12:08:27.265146 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:27 crc kubenswrapper[4753]: E0129 12:08:27.265372 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:27.765338806 +0000 UTC m=+122.017420271 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:27 crc kubenswrapper[4753]: I0129 12:08:27.265541 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2412fa08-e643-4225-b494-eb999ea93fce-metrics-certs\") pod \"network-metrics-daemon-p6m5g\" (UID: \"2412fa08-e643-4225-b494-eb999ea93fce\") " pod="openshift-multus/network-metrics-daemon-p6m5g" Jan 29 12:08:27 crc kubenswrapper[4753]: I0129 12:08:27.265616 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:27 crc kubenswrapper[4753]: E0129 12:08:27.266038 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:27.766023186 +0000 UTC m=+122.018104641 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:27 crc kubenswrapper[4753]: I0129 12:08:27.335109 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-qkqfh" event={"ID":"e0c82664-4c0e-4f03-858f-dcf68539f92a","Type":"ContainerStarted","Data":"8ea7024d97f45bf240abe526d4a9f40f2d1f247abdc80bd1dac52b69d9e98b51"} Jan 29 12:08:27 crc kubenswrapper[4753]: I0129 12:08:27.338479 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-6xpng" event={"ID":"d2148dab-1475-49ad-b154-99cd20f15125","Type":"ContainerStarted","Data":"91f767ec918743b133c31f28c786df68b1e43a57479f69dde9b3236c9f5bb615"} Jan 29 12:08:27 crc kubenswrapper[4753]: I0129 12:08:27.341618 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-nrt2l" event={"ID":"86659644-dec3-4e1a-ba32-5b4487a2f4c8","Type":"ContainerStarted","Data":"3caad7914dc9a72cf187b258cd9f9ba510c323d0cba3c47c36cf1b3b2d5cef78"} Jan 29 12:08:27 crc kubenswrapper[4753]: I0129 12:08:27.343979 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-s4nq8" event={"ID":"2d66d4a7-6db6-438c-9d2b-cb2bb0ef846b","Type":"ContainerStarted","Data":"25d2ca4e24d70f2c53c0a6c6314a932e60361fa706f4576f63ff680560aa7806"} Jan 29 12:08:27 crc kubenswrapper[4753]: I0129 12:08:27.346152 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-kv2r9" event={"ID":"330eaab5-20d9-4b5c-a8f2-8bed8d2df7b7","Type":"ContainerStarted","Data":"8f7aee61062f5528424bcc611fc93dc000178ec1002db397a90d2fe3852c3c08"} Jan 29 12:08:27 crc kubenswrapper[4753]: I0129 12:08:27.348590 4753 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-86kqx container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.24:8443/healthz\": dial tcp 10.217.0.24:8443: connect: connection refused" start-of-body= Jan 29 12:08:27 crc kubenswrapper[4753]: I0129 12:08:27.348638 4753 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-86kqx" podUID="3628b07e-d247-4487-8095-821bf56656b8" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.24:8443/healthz\": dial tcp 10.217.0.24:8443: connect: connection refused" Jan 29 12:08:27 crc kubenswrapper[4753]: I0129 12:08:27.367014 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:27 crc kubenswrapper[4753]: E0129 12:08:27.371463 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:27.871438582 +0000 UTC m=+122.123520037 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:27 crc kubenswrapper[4753]: I0129 12:08:27.379375 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2412fa08-e643-4225-b494-eb999ea93fce-metrics-certs\") pod \"network-metrics-daemon-p6m5g\" (UID: \"2412fa08-e643-4225-b494-eb999ea93fce\") " pod="openshift-multus/network-metrics-daemon-p6m5g" Jan 29 12:08:27 crc kubenswrapper[4753]: I0129 12:08:27.408408 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-qkqfh" podStartSLOduration=89.407960622 podStartE2EDuration="1m29.407960622s" podCreationTimestamp="2026-01-29 12:06:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:08:27.404071207 +0000 UTC m=+121.656152672" watchObservedRunningTime="2026-01-29 12:08:27.407960622 +0000 UTC m=+121.660042077" Jan 29 12:08:27 crc kubenswrapper[4753]: I0129 12:08:27.408478 4753 patch_prober.go:28] interesting pod/router-default-5444994796-jpxd9 container/router namespace/openshift-ingress: Startup probe status=failure output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" start-of-body= Jan 29 12:08:27 crc kubenswrapper[4753]: I0129 12:08:27.408869 4753 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-jpxd9" podUID="8e54e1ee-a1b8-454c-8b09-e252ca3fe9da" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" Jan 29 12:08:27 crc kubenswrapper[4753]: I0129 12:08:27.633832 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-p6m5g" Jan 29 12:08:27 crc kubenswrapper[4753]: I0129 12:08:27.637744 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:27 crc kubenswrapper[4753]: E0129 12:08:27.639693 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:28.139679532 +0000 UTC m=+122.391760987 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:27 crc kubenswrapper[4753]: I0129 12:08:27.738467 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:27 crc kubenswrapper[4753]: E0129 12:08:27.744932 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:28.244896762 +0000 UTC m=+122.496978217 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:27 crc kubenswrapper[4753]: I0129 12:08:27.746139 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/machine-api-operator-5694c8668f-s4nq8" podStartSLOduration=88.746116648 podStartE2EDuration="1m28.746116648s" podCreationTimestamp="2026-01-29 12:06:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:08:27.662134555 +0000 UTC m=+121.914216010" watchObservedRunningTime="2026-01-29 12:08:27.746116648 +0000 UTC m=+121.998198103" Jan 29 12:08:27 crc kubenswrapper[4753]: I0129 12:08:27.827679 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-gln66"] Jan 29 12:08:27 crc kubenswrapper[4753]: I0129 12:08:27.834812 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca-operator/service-ca-operator-777779d784-kv2r9" podStartSLOduration=88.834786239 podStartE2EDuration="1m28.834786239s" podCreationTimestamp="2026-01-29 12:06:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:08:27.827877025 +0000 UTC m=+122.079958480" watchObservedRunningTime="2026-01-29 12:08:27.834786239 +0000 UTC m=+122.086867694" Jan 29 12:08:27 crc kubenswrapper[4753]: I0129 12:08:27.836522 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-79sb7"] Jan 29 12:08:27 crc kubenswrapper[4753]: I0129 12:08:27.840769 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:27 crc kubenswrapper[4753]: E0129 12:08:27.841337 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:28.341319093 +0000 UTC m=+122.593400548 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:27 crc kubenswrapper[4753]: I0129 12:08:27.954804 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:27 crc kubenswrapper[4753]: E0129 12:08:27.955604 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:28.45557621 +0000 UTC m=+122.707657665 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:28 crc kubenswrapper[4753]: I0129 12:08:27.978283 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver/apiserver-76f77b778f-nrt2l" podStartSLOduration=90.97825714 podStartE2EDuration="1m30.97825714s" podCreationTimestamp="2026-01-29 12:06:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:08:27.969516622 +0000 UTC m=+122.221598077" watchObservedRunningTime="2026-01-29 12:08:27.97825714 +0000 UTC m=+122.230338585" Jan 29 12:08:28 crc kubenswrapper[4753]: I0129 12:08:28.056988 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:28 crc kubenswrapper[4753]: E0129 12:08:28.057800 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:28.557771571 +0000 UTC m=+122.809853036 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:28 crc kubenswrapper[4753]: I0129 12:08:28.190645 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:28 crc kubenswrapper[4753]: E0129 12:08:28.191076 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:28.69104948 +0000 UTC m=+122.943130935 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:28 crc kubenswrapper[4753]: I0129 12:08:28.206801 4753 patch_prober.go:28] interesting pod/router-default-5444994796-jpxd9 container/router namespace/openshift-ingress: Startup probe status=failure output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" start-of-body= Jan 29 12:08:28 crc kubenswrapper[4753]: I0129 12:08:28.206875 4753 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-jpxd9" podUID="8e54e1ee-a1b8-454c-8b09-e252ca3fe9da" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" Jan 29 12:08:28 crc kubenswrapper[4753]: I0129 12:08:28.206876 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-6xpng" podStartSLOduration=90.206852588 podStartE2EDuration="1m30.206852588s" podCreationTimestamp="2026-01-29 12:06:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:08:28.204183139 +0000 UTC m=+122.456264594" watchObservedRunningTime="2026-01-29 12:08:28.206852588 +0000 UTC m=+122.458934043" Jan 29 12:08:28 crc kubenswrapper[4753]: I0129 12:08:28.293495 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:28 crc kubenswrapper[4753]: E0129 12:08:28.294002 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:28.793986754 +0000 UTC m=+123.046068209 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:28 crc kubenswrapper[4753]: I0129 12:08:28.374430 4753 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-86kqx container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.24:8443/healthz\": dial tcp 10.217.0.24:8443: connect: connection refused" start-of-body= Jan 29 12:08:28 crc kubenswrapper[4753]: I0129 12:08:28.374486 4753 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-86kqx" podUID="3628b07e-d247-4487-8095-821bf56656b8" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.24:8443/healthz\": dial tcp 10.217.0.24:8443: connect: connection refused" Jan 29 12:08:28 crc kubenswrapper[4753]: I0129 12:08:28.392007 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-dx8db" Jan 29 12:08:28 crc kubenswrapper[4753]: I0129 12:08:28.395615 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:28 crc kubenswrapper[4753]: E0129 12:08:28.399444 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:28.89939873 +0000 UTC m=+123.151480185 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:28 crc kubenswrapper[4753]: I0129 12:08:28.485213 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-79sb7" event={"ID":"40dd9f5b-066f-4400-a4a1-6a7a9eda8c90","Type":"ContainerStarted","Data":"04c3dbac28cf109781764c1315dde63a5da0d55ba6f58dc850c779017d68626f"} Jan 29 12:08:28 crc kubenswrapper[4753]: I0129 12:08:28.508643 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:28 crc kubenswrapper[4753]: I0129 12:08:28.511060 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-g5td2" event={"ID":"3519d984-8bed-45ca-9e6d-687bec69ee24","Type":"ContainerStarted","Data":"cbf47971e8a32cf2bda18cba68da4e892e1b88b5dbf0d14e0d0538e91395e235"} Jan 29 12:08:28 crc kubenswrapper[4753]: E0129 12:08:28.511996 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:29.011979168 +0000 UTC m=+123.264060623 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:28 crc kubenswrapper[4753]: I0129 12:08:28.543379 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-gln66" event={"ID":"d8fa6dd3-8d5a-4518-a95a-d1782279340d","Type":"ContainerStarted","Data":"b879e147fcfe7b9efd9681d375290f10f4463913e28fbbd5d39ca2fc02d7b41e"} Jan 29 12:08:28 crc kubenswrapper[4753]: I0129 12:08:28.587985 4753 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-f6z5s container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.11:8443/healthz\": dial tcp 10.217.0.11:8443: connect: connection refused" start-of-body= Jan 29 12:08:28 crc kubenswrapper[4753]: I0129 12:08:28.588093 4753 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-f6z5s" podUID="4b74c52f-102d-45ae-a789-0c43429e8aa0" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.11:8443/healthz\": dial tcp 10.217.0.11:8443: connect: connection refused" Jan 29 12:08:28 crc kubenswrapper[4753]: I0129 12:08:28.610038 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:28 crc kubenswrapper[4753]: E0129 12:08:28.633167 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:29.133125319 +0000 UTC m=+123.385206774 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:28 crc kubenswrapper[4753]: I0129 12:08:28.790536 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:29 crc kubenswrapper[4753]: E0129 12:08:28.794382 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:29.294362715 +0000 UTC m=+123.546444250 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:29 crc kubenswrapper[4753]: I0129 12:08:29.057777 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:29 crc kubenswrapper[4753]: E0129 12:08:29.058256 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:29.558209434 +0000 UTC m=+123.810290889 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:29 crc kubenswrapper[4753]: I0129 12:08:29.091672 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-g5td2" podStartSLOduration=91.091642863 podStartE2EDuration="1m31.091642863s" podCreationTimestamp="2026-01-29 12:06:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:08:28.721364867 +0000 UTC m=+122.973446322" watchObservedRunningTime="2026-01-29 12:08:29.091642863 +0000 UTC m=+123.343724318" Jan 29 12:08:29 crc kubenswrapper[4753]: I0129 12:08:29.093489 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-znsp6"] Jan 29 12:08:29 crc kubenswrapper[4753]: I0129 12:08:29.123538 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-jpxd9" Jan 29 12:08:29 crc kubenswrapper[4753]: I0129 12:08:29.124621 4753 patch_prober.go:28] interesting pod/router-default-5444994796-jpxd9 container/router namespace/openshift-ingress: Startup probe status=failure output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" start-of-body= Jan 29 12:08:29 crc kubenswrapper[4753]: I0129 12:08:29.124660 4753 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-jpxd9" podUID="8e54e1ee-a1b8-454c-8b09-e252ca3fe9da" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" Jan 29 12:08:29 crc kubenswrapper[4753]: I0129 12:08:29.202999 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:29 crc kubenswrapper[4753]: E0129 12:08:29.217881 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:29.717864694 +0000 UTC m=+123.969946149 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:29 crc kubenswrapper[4753]: I0129 12:08:29.335464 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:29 crc kubenswrapper[4753]: E0129 12:08:29.336134 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:29.83611532 +0000 UTC m=+124.088196775 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:29 crc kubenswrapper[4753]: I0129 12:08:29.362472 4753 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-f6z5s container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.11:8443/healthz\": dial tcp 10.217.0.11:8443: connect: connection refused" start-of-body= Jan 29 12:08:29 crc kubenswrapper[4753]: I0129 12:08:29.362560 4753 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-f6z5s" podUID="4b74c52f-102d-45ae-a789-0c43429e8aa0" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.11:8443/healthz\": dial tcp 10.217.0.11:8443: connect: connection refused" Jan 29 12:08:29 crc kubenswrapper[4753]: I0129 12:08:29.362486 4753 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-f6z5s container/openshift-config-operator namespace/openshift-config-operator: Liveness probe status=failure output="Get \"https://10.217.0.11:8443/healthz\": dial tcp 10.217.0.11:8443: connect: connection refused" start-of-body= Jan 29 12:08:29 crc kubenswrapper[4753]: I0129 12:08:29.362639 4753 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-f6z5s" podUID="4b74c52f-102d-45ae-a789-0c43429e8aa0" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.11:8443/healthz\": dial tcp 10.217.0.11:8443: connect: connection refused" Jan 29 12:08:29 crc kubenswrapper[4753]: I0129 12:08:29.376358 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-rwlht"] Jan 29 12:08:29 crc kubenswrapper[4753]: W0129 12:08:29.421871 4753 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode1a92560_535a_4f2c_9ce6_9940bfc1bf37.slice/crio-0d11f172fa54616c4be55aa5fcb66a5e7e352dfd379b56f35f61dc3d513efff3 WatchSource:0}: Error finding container 0d11f172fa54616c4be55aa5fcb66a5e7e352dfd379b56f35f61dc3d513efff3: Status 404 returned error can't find the container with id 0d11f172fa54616c4be55aa5fcb66a5e7e352dfd379b56f35f61dc3d513efff3 Jan 29 12:08:29 crc kubenswrapper[4753]: I0129 12:08:29.437336 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:29 crc kubenswrapper[4753]: E0129 12:08:29.437812 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:29.937794105 +0000 UTC m=+124.189875560 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:29 crc kubenswrapper[4753]: I0129 12:08:29.437889 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-mql86"] Jan 29 12:08:29 crc kubenswrapper[4753]: I0129 12:08:29.455275 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-gntl4"] Jan 29 12:08:29 crc kubenswrapper[4753]: I0129 12:08:29.460100 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-779px"] Jan 29 12:08:29 crc kubenswrapper[4753]: I0129 12:08:29.467652 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-fp6bq"] Jan 29 12:08:29 crc kubenswrapper[4753]: I0129 12:08:29.475019 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-v88cc"] Jan 29 12:08:29 crc kubenswrapper[4753]: I0129 12:08:29.493150 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-gkf8g"] Jan 29 12:08:29 crc kubenswrapper[4753]: I0129 12:08:29.538447 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:29 crc kubenswrapper[4753]: E0129 12:08:29.538947 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:30.038926885 +0000 UTC m=+124.291008340 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:29 crc kubenswrapper[4753]: I0129 12:08:29.640664 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:29 crc kubenswrapper[4753]: E0129 12:08:29.641285 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:30.1412562 +0000 UTC m=+124.393337655 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:29 crc kubenswrapper[4753]: I0129 12:08:29.653643 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-79sb7" event={"ID":"40dd9f5b-066f-4400-a4a1-6a7a9eda8c90","Type":"ContainerStarted","Data":"02f4cae0320c77e3a87fd958dcc000c05b1e27e17d09fed10a1e09ab6040d89d"} Jan 29 12:08:29 crc kubenswrapper[4753]: I0129 12:08:29.654946 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-79sb7" Jan 29 12:08:29 crc kubenswrapper[4753]: I0129 12:08:29.683343 4753 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-79sb7 container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.33:8443/healthz\": dial tcp 10.217.0.33:8443: connect: connection refused" start-of-body= Jan 29 12:08:29 crc kubenswrapper[4753]: I0129 12:08:29.683407 4753 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-79sb7" podUID="40dd9f5b-066f-4400-a4a1-6a7a9eda8c90" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.33:8443/healthz\": dial tcp 10.217.0.33:8443: connect: connection refused" Jan 29 12:08:29 crc kubenswrapper[4753]: I0129 12:08:29.692901 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-rwlht" event={"ID":"e1a92560-535a-4f2c-9ce6-9940bfc1bf37","Type":"ContainerStarted","Data":"0d11f172fa54616c4be55aa5fcb66a5e7e352dfd379b56f35f61dc3d513efff3"} Jan 29 12:08:29 crc kubenswrapper[4753]: I0129 12:08:29.726135 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-gln66" event={"ID":"d8fa6dd3-8d5a-4518-a95a-d1782279340d","Type":"ContainerStarted","Data":"4ff09d66dfeab6e731dbac56e8df75e69e49ea3fb14c7a76d7be8d45c18328b4"} Jan 29 12:08:29 crc kubenswrapper[4753]: I0129 12:08:29.744587 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:29 crc kubenswrapper[4753]: E0129 12:08:29.746812 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:30.246778519 +0000 UTC m=+124.498859974 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:29 crc kubenswrapper[4753]: I0129 12:08:29.771167 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-znsp6" event={"ID":"f9d4b340-8ceb-41e1-8289-f48222e5d3d8","Type":"ContainerStarted","Data":"612b9fb229f5cfac82080f92b2aae3ac1598b334ea6bb17017c7bdea6a1e71c6"} Jan 29 12:08:29 crc kubenswrapper[4753]: I0129 12:08:29.771559 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-58897d9998-znsp6" Jan 29 12:08:29 crc kubenswrapper[4753]: I0129 12:08:29.774918 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494800-86kw2"] Jan 29 12:08:29 crc kubenswrapper[4753]: I0129 12:08:29.798480 4753 patch_prober.go:28] interesting pod/console-operator-58897d9998-znsp6 container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.14:8443/readyz\": dial tcp 10.217.0.14:8443: connect: connection refused" start-of-body= Jan 29 12:08:29 crc kubenswrapper[4753]: I0129 12:08:29.798616 4753 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-znsp6" podUID="f9d4b340-8ceb-41e1-8289-f48222e5d3d8" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.14:8443/readyz\": dial tcp 10.217.0.14:8443: connect: connection refused" Jan 29 12:08:29 crc kubenswrapper[4753]: I0129 12:08:29.821012 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-6b2dk"] Jan 29 12:08:29 crc kubenswrapper[4753]: I0129 12:08:29.839210 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-2kbdk"] Jan 29 12:08:29 crc kubenswrapper[4753]: I0129 12:08:29.841315 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-8dvz9"] Jan 29 12:08:29 crc kubenswrapper[4753]: I0129 12:08:29.847406 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:29 crc kubenswrapper[4753]: E0129 12:08:29.849948 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:30.349928399 +0000 UTC m=+124.602010054 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:29 crc kubenswrapper[4753]: I0129 12:08:29.854315 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-4bpqt"] Jan 29 12:08:29 crc kubenswrapper[4753]: I0129 12:08:29.857177 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-nbdct"] Jan 29 12:08:29 crc kubenswrapper[4753]: I0129 12:08:29.869423 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-8kz2s"] Jan 29 12:08:29 crc kubenswrapper[4753]: I0129 12:08:29.887600 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-j4xlb"] Jan 29 12:08:29 crc kubenswrapper[4753]: W0129 12:08:29.926104 4753 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb1377377_5ec9_4ff9_8550_aa1ae603a8cf.slice/crio-af4163aaed73b9eb641750b9b226554e0ee3a5756c46099aa04081e51ece00f4 WatchSource:0}: Error finding container af4163aaed73b9eb641750b9b226554e0ee3a5756c46099aa04081e51ece00f4: Status 404 returned error can't find the container with id af4163aaed73b9eb641750b9b226554e0ee3a5756c46099aa04081e51ece00f4 Jan 29 12:08:29 crc kubenswrapper[4753]: I0129 12:08:29.937449 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-zdhjn"] Jan 29 12:08:29 crc kubenswrapper[4753]: I0129 12:08:29.937502 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-p7d5d"] Jan 29 12:08:29 crc kubenswrapper[4753]: I0129 12:08:29.937518 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-jrblr"] Jan 29 12:08:29 crc kubenswrapper[4753]: I0129 12:08:29.937533 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-lswml"] Jan 29 12:08:29 crc kubenswrapper[4753]: I0129 12:08:29.938010 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-79sb7" podStartSLOduration=90.937985162 podStartE2EDuration="1m30.937985162s" podCreationTimestamp="2026-01-29 12:06:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:08:29.726868141 +0000 UTC m=+123.978949606" watchObservedRunningTime="2026-01-29 12:08:29.937985162 +0000 UTC m=+124.190066617" Jan 29 12:08:29 crc kubenswrapper[4753]: I0129 12:08:29.938183 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-b2rb9"] Jan 29 12:08:29 crc kubenswrapper[4753]: I0129 12:08:29.952005 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:29 crc kubenswrapper[4753]: E0129 12:08:29.952385 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:30.452367697 +0000 UTC m=+124.704449152 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:29 crc kubenswrapper[4753]: I0129 12:08:29.963579 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-lpjkw"] Jan 29 12:08:29 crc kubenswrapper[4753]: I0129 12:08:29.963905 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console-operator/console-operator-58897d9998-znsp6" podStartSLOduration=91.963881917 podStartE2EDuration="1m31.963881917s" podCreationTimestamp="2026-01-29 12:06:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:08:29.820973883 +0000 UTC m=+124.073055338" watchObservedRunningTime="2026-01-29 12:08:29.963881917 +0000 UTC m=+124.215963372" Jan 29 12:08:29 crc kubenswrapper[4753]: I0129 12:08:29.972344 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-p6m5g"] Jan 29 12:08:30 crc kubenswrapper[4753]: I0129 12:08:30.055342 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:30 crc kubenswrapper[4753]: E0129 12:08:30.055779 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:30.555766794 +0000 UTC m=+124.807848249 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:30 crc kubenswrapper[4753]: I0129 12:08:30.131239 4753 patch_prober.go:28] interesting pod/router-default-5444994796-jpxd9 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 29 12:08:30 crc kubenswrapper[4753]: [-]has-synced failed: reason withheld Jan 29 12:08:30 crc kubenswrapper[4753]: [+]process-running ok Jan 29 12:08:30 crc kubenswrapper[4753]: healthz check failed Jan 29 12:08:30 crc kubenswrapper[4753]: I0129 12:08:30.131294 4753 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-jpxd9" podUID="8e54e1ee-a1b8-454c-8b09-e252ca3fe9da" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 29 12:08:30 crc kubenswrapper[4753]: I0129 12:08:30.160866 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:30 crc kubenswrapper[4753]: E0129 12:08:30.161762 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:30.661744066 +0000 UTC m=+124.913825511 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:30 crc kubenswrapper[4753]: I0129 12:08:30.264886 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:30 crc kubenswrapper[4753]: E0129 12:08:30.265434 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:30.765410521 +0000 UTC m=+125.017491976 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:30 crc kubenswrapper[4753]: I0129 12:08:30.365651 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:30 crc kubenswrapper[4753]: E0129 12:08:30.366059 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:30.866033495 +0000 UTC m=+125.118114950 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:30 crc kubenswrapper[4753]: I0129 12:08:30.469330 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:30 crc kubenswrapper[4753]: E0129 12:08:30.469624 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:30.969603337 +0000 UTC m=+125.221684862 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:30 crc kubenswrapper[4753]: I0129 12:08:30.647634 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:30 crc kubenswrapper[4753]: E0129 12:08:30.648140 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:31.148122194 +0000 UTC m=+125.400203649 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:30 crc kubenswrapper[4753]: I0129 12:08:30.755790 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:30 crc kubenswrapper[4753]: E0129 12:08:30.756252 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:31.256214849 +0000 UTC m=+125.508296304 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:30 crc kubenswrapper[4753]: I0129 12:08:30.808865 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-lpjkw" event={"ID":"471bd5c8-e8c1-4078-8e32-c0c30a1b237b","Type":"ContainerStarted","Data":"b873928c1ce4d8185acfab2513a65c146b54814e8a415a7de8b016fed5262ff5"} Jan 29 12:08:30 crc kubenswrapper[4753]: I0129 12:08:30.813932 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-jrblr" event={"ID":"e63ba05b-c0ce-43bc-b660-f5d93a3c45a5","Type":"ContainerStarted","Data":"702faf7844535a37b7edfeaf93312504dc954cdccd536454fedbd2ecdda23934"} Jan 29 12:08:30 crc kubenswrapper[4753]: I0129 12:08:30.908337 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-j4xlb" event={"ID":"e491ff79-3c18-4cf1-9e8b-3216fc5b2e7e","Type":"ContainerStarted","Data":"f0b4d3174279878bb2a7bf1ba4649e5d6bd95d54f10a6ade65201f6ab93e8fcd"} Jan 29 12:08:30 crc kubenswrapper[4753]: I0129 12:08:30.921775 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-zdhjn" event={"ID":"9728fd7e-6203-4082-9297-2d3fd9e17b74","Type":"ContainerStarted","Data":"5ac04fdbe3e212ddf84391c573f7ca6c55e9e5c8d511b680b826e710c41b0856"} Jan 29 12:08:30 crc kubenswrapper[4753]: I0129 12:08:30.926190 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:30 crc kubenswrapper[4753]: E0129 12:08:30.929900 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:31.42983383 +0000 UTC m=+125.681915285 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:30 crc kubenswrapper[4753]: I0129 12:08:30.930572 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:30 crc kubenswrapper[4753]: E0129 12:08:30.931125 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:31.431086967 +0000 UTC m=+125.683168422 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:30 crc kubenswrapper[4753]: I0129 12:08:30.988592 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-mql86" event={"ID":"91845c42-ac54-4831-b8ac-73737902b703","Type":"ContainerStarted","Data":"8970286aeb105a26786d49d894b73f20a915aa3f85565955c9e8cf831abaa2fd"} Jan 29 12:08:31 crc kubenswrapper[4753]: I0129 12:08:31.075504 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-p6m5g" event={"ID":"2412fa08-e643-4225-b494-eb999ea93fce","Type":"ContainerStarted","Data":"cd12b2f10b06c9d27e85399e89dc04d783dc0d62de46c48ea7a483138a660092"} Jan 29 12:08:31 crc kubenswrapper[4753]: I0129 12:08:31.082585 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:31 crc kubenswrapper[4753]: E0129 12:08:31.083395 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:31.583208754 +0000 UTC m=+125.835290219 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:31 crc kubenswrapper[4753]: I0129 12:08:31.144833 4753 patch_prober.go:28] interesting pod/router-default-5444994796-jpxd9 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 29 12:08:31 crc kubenswrapper[4753]: [-]has-synced failed: reason withheld Jan 29 12:08:31 crc kubenswrapper[4753]: [+]process-running ok Jan 29 12:08:31 crc kubenswrapper[4753]: healthz check failed Jan 29 12:08:31 crc kubenswrapper[4753]: I0129 12:08:31.144895 4753 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-jpxd9" podUID="8e54e1ee-a1b8-454c-8b09-e252ca3fe9da" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 29 12:08:31 crc kubenswrapper[4753]: I0129 12:08:31.146052 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-2kbdk" event={"ID":"b1377377-5ec9-4ff9-8550-aa1ae603a8cf","Type":"ContainerStarted","Data":"af4163aaed73b9eb641750b9b226554e0ee3a5756c46099aa04081e51ece00f4"} Jan 29 12:08:31 crc kubenswrapper[4753]: I0129 12:08:31.149437 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-fp6bq" event={"ID":"279ec67d-fb6e-49a2-8339-854132f4e3ab","Type":"ContainerStarted","Data":"b7b893401971d73c6e282bc4c1fef3ce5ad139a50cc7d65a9feae69ce909ebad"} Jan 29 12:08:31 crc kubenswrapper[4753]: I0129 12:08:31.179934 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-8dvz9" event={"ID":"8fd9a883-0e5e-424f-bc31-199f2103c548","Type":"ContainerStarted","Data":"44823d18388e12635a27c5730fa74bfcd2da1666f534aca2055d779605a48e68"} Jan 29 12:08:31 crc kubenswrapper[4753]: I0129 12:08:31.183746 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:31 crc kubenswrapper[4753]: E0129 12:08:31.184185 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:31.684169319 +0000 UTC m=+125.936250774 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:31 crc kubenswrapper[4753]: I0129 12:08:31.285927 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:31 crc kubenswrapper[4753]: E0129 12:08:31.286417 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:31.78638707 +0000 UTC m=+126.038468525 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:31 crc kubenswrapper[4753]: I0129 12:08:31.309812 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-v88cc" event={"ID":"b41a4b6f-931f-41d8-86b4-a9f1193eb48a","Type":"ContainerStarted","Data":"5a837bada57dca3e367bfe39668854b2187253fe9e3d35a9876b11d687e8310a"} Jan 29 12:08:31 crc kubenswrapper[4753]: I0129 12:08:31.420500 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:31 crc kubenswrapper[4753]: E0129 12:08:31.420960 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:31.920942958 +0000 UTC m=+126.173024413 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:31 crc kubenswrapper[4753]: I0129 12:08:31.534832 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-nbdct" event={"ID":"96da6032-b04a-4eef-8170-a57f2f9b00ba","Type":"ContainerStarted","Data":"4685a8efd0da8c914073e570de22f366269768b2649443cebc2ff8ef96e31f28"} Jan 29 12:08:31 crc kubenswrapper[4753]: I0129 12:08:31.538251 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:31 crc kubenswrapper[4753]: E0129 12:08:31.539420 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:32.03939625 +0000 UTC m=+126.291477705 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:31 crc kubenswrapper[4753]: I0129 12:08:31.549082 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-znsp6" event={"ID":"f9d4b340-8ceb-41e1-8289-f48222e5d3d8","Type":"ContainerStarted","Data":"8d5ee689b6ed165a1e5960a2410b5ad789a6c2c9895a7f943f824a256aabffb5"} Jan 29 12:08:31 crc kubenswrapper[4753]: I0129 12:08:31.550897 4753 patch_prober.go:28] interesting pod/console-operator-58897d9998-znsp6 container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.14:8443/readyz\": dial tcp 10.217.0.14:8443: connect: connection refused" start-of-body= Jan 29 12:08:31 crc kubenswrapper[4753]: I0129 12:08:31.551003 4753 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-znsp6" podUID="f9d4b340-8ceb-41e1-8289-f48222e5d3d8" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.14:8443/readyz\": dial tcp 10.217.0.14:8443: connect: connection refused" Jan 29 12:08:31 crc kubenswrapper[4753]: I0129 12:08:31.587418 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494800-86kw2" event={"ID":"4f48974c-8787-482a-b962-f7646e12952e","Type":"ContainerStarted","Data":"aaf1083b3586b0a7b187cdb6a824407ea02b448d1b5d8ddb4236b4b8fe56f942"} Jan 29 12:08:31 crc kubenswrapper[4753]: I0129 12:08:31.613294 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-gkf8g" event={"ID":"8fe1a105-96fa-4847-97a5-3d83d04a86ec","Type":"ContainerStarted","Data":"b00a213838f75d2474d2e22ec6f6e813a0198d766501bfcf701f44692aaaa6d0"} Jan 29 12:08:31 crc kubenswrapper[4753]: I0129 12:08:31.618301 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-6b2dk" event={"ID":"7a2fedca-4a36-42ec-a6b6-63fcd2de6c07","Type":"ContainerStarted","Data":"d898f180f0134c3f6c4d1a16a8f13b7da81bbcc0c1014981397fe3eca95e98bf"} Jan 29 12:08:31 crc kubenswrapper[4753]: I0129 12:08:31.620170 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-b2rb9" event={"ID":"200ffbfe-dff4-45e2-944d-34a3ad56f018","Type":"ContainerStarted","Data":"c0873dae5069f31eae22a405a220dfc600549b8679f1f3a6801c20108ec8c5b8"} Jan 29 12:08:31 crc kubenswrapper[4753]: I0129 12:08:31.622168 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-rwlht" event={"ID":"e1a92560-535a-4f2c-9ce6-9940bfc1bf37","Type":"ContainerStarted","Data":"c2d917d7f976cb7e04d1629bf61375354977c06080619ccc3f8f7a200b94b976"} Jan 29 12:08:31 crc kubenswrapper[4753]: I0129 12:08:31.625117 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-gntl4" event={"ID":"c612cdf9-575c-4d80-9a48-93e32d5673ae","Type":"ContainerStarted","Data":"bd7704be76992bf2eba34d6b26d725b89a438f2d85303695bd25f52c565e27e9"} Jan 29 12:08:31 crc kubenswrapper[4753]: I0129 12:08:31.627655 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-779px" event={"ID":"368534d6-5359-447b-afd1-2a1c16c0c5d8","Type":"ContainerStarted","Data":"301b665404fdd08ba5899deeabb66783eaf521fd97f1c716c6d756126c655717"} Jan 29 12:08:31 crc kubenswrapper[4753]: I0129 12:08:31.628841 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-4bpqt" event={"ID":"b6e8630c-f357-4c2e-88d0-45218ea79981","Type":"ContainerStarted","Data":"05ea52dacebc75a80854b81d94db51dd6915223045e46efead0b8dd55312bc4e"} Jan 29 12:08:31 crc kubenswrapper[4753]: I0129 12:08:31.630609 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-p7d5d" event={"ID":"6912f3bb-d8bc-4dc1-b3f0-9e7e1e8d354a","Type":"ContainerStarted","Data":"323505b1c271b3792148d42ad3c2862e7c11c98efc5671f3a1bb14fab50ff847"} Jan 29 12:08:31 crc kubenswrapper[4753]: I0129 12:08:31.651662 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-8kz2s" event={"ID":"3cea010a-6482-47b3-9c79-45432b921956","Type":"ContainerStarted","Data":"cefb8323023a7b93839ef7ec6588f34d1e0fd0af92c5adfd6f4cc25c08b1603d"} Jan 29 12:08:31 crc kubenswrapper[4753]: I0129 12:08:31.667851 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-lswml" event={"ID":"dad90295-65db-470c-8041-19fcf86d0439","Type":"ContainerStarted","Data":"d279b84d2d4a48ef2632e77a7739bcae1f51874d8c70cae02fb31bafc0fb0f34"} Jan 29 12:08:31 crc kubenswrapper[4753]: I0129 12:08:31.668639 4753 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-79sb7 container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.33:8443/healthz\": dial tcp 10.217.0.33:8443: connect: connection refused" start-of-body= Jan 29 12:08:31 crc kubenswrapper[4753]: I0129 12:08:31.668686 4753 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-79sb7" podUID="40dd9f5b-066f-4400-a4a1-6a7a9eda8c90" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.33:8443/healthz\": dial tcp 10.217.0.33:8443: connect: connection refused" Jan 29 12:08:31 crc kubenswrapper[4753]: I0129 12:08:31.679564 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-rwlht" podStartSLOduration=94.679538182 podStartE2EDuration="1m34.679538182s" podCreationTimestamp="2026-01-29 12:06:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:08:31.677613206 +0000 UTC m=+125.929694661" watchObservedRunningTime="2026-01-29 12:08:31.679538182 +0000 UTC m=+125.931619637" Jan 29 12:08:31 crc kubenswrapper[4753]: I0129 12:08:31.686556 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:31 crc kubenswrapper[4753]: E0129 12:08:31.688411 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:32.188392874 +0000 UTC m=+126.440474409 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:31 crc kubenswrapper[4753]: I0129 12:08:31.787967 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:31 crc kubenswrapper[4753]: E0129 12:08:31.792829 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:32.29278607 +0000 UTC m=+126.544867545 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:31 crc kubenswrapper[4753]: I0129 12:08:31.793341 4753 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-76f77b778f-nrt2l" Jan 29 12:08:31 crc kubenswrapper[4753]: I0129 12:08:31.794024 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-76f77b778f-nrt2l" Jan 29 12:08:31 crc kubenswrapper[4753]: I0129 12:08:31.795792 4753 patch_prober.go:28] interesting pod/apiserver-76f77b778f-nrt2l container/openshift-apiserver namespace/openshift-apiserver: Startup probe status=failure output="Get \"https://10.217.0.7:8443/livez\": dial tcp 10.217.0.7:8443: connect: connection refused" start-of-body= Jan 29 12:08:31 crc kubenswrapper[4753]: I0129 12:08:31.795866 4753 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-apiserver/apiserver-76f77b778f-nrt2l" podUID="86659644-dec3-4e1a-ba32-5b4487a2f4c8" containerName="openshift-apiserver" probeResult="failure" output="Get \"https://10.217.0.7:8443/livez\": dial tcp 10.217.0.7:8443: connect: connection refused" Jan 29 12:08:31 crc kubenswrapper[4753]: I0129 12:08:31.895384 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:31 crc kubenswrapper[4753]: E0129 12:08:31.895985 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:32.39596786 +0000 UTC m=+126.648049315 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:32 crc kubenswrapper[4753]: I0129 12:08:31.997020 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:32 crc kubenswrapper[4753]: E0129 12:08:31.999768 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:32.499723537 +0000 UTC m=+126.751804992 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:32 crc kubenswrapper[4753]: I0129 12:08:32.152502 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:32 crc kubenswrapper[4753]: E0129 12:08:32.153056 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:32.653036269 +0000 UTC m=+126.905117734 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:32 crc kubenswrapper[4753]: I0129 12:08:32.169549 4753 patch_prober.go:28] interesting pod/router-default-5444994796-jpxd9 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 29 12:08:32 crc kubenswrapper[4753]: [-]has-synced failed: reason withheld Jan 29 12:08:32 crc kubenswrapper[4753]: [+]process-running ok Jan 29 12:08:32 crc kubenswrapper[4753]: healthz check failed Jan 29 12:08:32 crc kubenswrapper[4753]: I0129 12:08:32.169633 4753 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-jpxd9" podUID="8e54e1ee-a1b8-454c-8b09-e252ca3fe9da" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 29 12:08:32 crc kubenswrapper[4753]: I0129 12:08:32.294888 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:32 crc kubenswrapper[4753]: E0129 12:08:32.295413 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:32.795396758 +0000 UTC m=+127.047478213 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:32 crc kubenswrapper[4753]: I0129 12:08:32.527469 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:32 crc kubenswrapper[4753]: E0129 12:08:32.527998 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:33.027979733 +0000 UTC m=+127.280061188 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:32 crc kubenswrapper[4753]: I0129 12:08:32.704240 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Jan 29 12:08:32 crc kubenswrapper[4753]: I0129 12:08:32.705413 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 29 12:08:32 crc kubenswrapper[4753]: I0129 12:08:32.712451 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:32 crc kubenswrapper[4753]: I0129 12:08:32.712792 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/6110c9b0-6745-4468-b8c6-02277a67883c-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"6110c9b0-6745-4468-b8c6-02277a67883c\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 29 12:08:32 crc kubenswrapper[4753]: I0129 12:08:32.712957 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/6110c9b0-6745-4468-b8c6-02277a67883c-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"6110c9b0-6745-4468-b8c6-02277a67883c\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 29 12:08:32 crc kubenswrapper[4753]: I0129 12:08:32.712976 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager"/"kube-root-ca.crt" Jan 29 12:08:32 crc kubenswrapper[4753]: E0129 12:08:32.713150 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:33.213122956 +0000 UTC m=+127.465204411 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:32 crc kubenswrapper[4753]: I0129 12:08:32.713244 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager"/"installer-sa-dockercfg-kjl2n" Jan 29 12:08:32 crc kubenswrapper[4753]: I0129 12:08:32.970968 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/6110c9b0-6745-4468-b8c6-02277a67883c-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"6110c9b0-6745-4468-b8c6-02277a67883c\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 29 12:08:32 crc kubenswrapper[4753]: I0129 12:08:32.971081 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:32 crc kubenswrapper[4753]: I0129 12:08:32.971164 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/6110c9b0-6745-4468-b8c6-02277a67883c-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"6110c9b0-6745-4468-b8c6-02277a67883c\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 29 12:08:32 crc kubenswrapper[4753]: I0129 12:08:32.971322 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/6110c9b0-6745-4468-b8c6-02277a67883c-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"6110c9b0-6745-4468-b8c6-02277a67883c\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 29 12:08:32 crc kubenswrapper[4753]: E0129 12:08:32.972073 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:33.47205732 +0000 UTC m=+127.724138775 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:32 crc kubenswrapper[4753]: I0129 12:08:32.988143 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Jan 29 12:08:33 crc kubenswrapper[4753]: I0129 12:08:33.096185 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-8dvz9" event={"ID":"8fd9a883-0e5e-424f-bc31-199f2103c548","Type":"ContainerStarted","Data":"948be2d9b139700e557efefe2435e2812699c47ce2a949a0096ec201722e61d1"} Jan 29 12:08:33 crc kubenswrapper[4753]: I0129 12:08:33.129988 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:33 crc kubenswrapper[4753]: E0129 12:08:33.130560 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:33.630541635 +0000 UTC m=+127.882623090 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:33 crc kubenswrapper[4753]: I0129 12:08:33.133551 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-j4xlb" event={"ID":"e491ff79-3c18-4cf1-9e8b-3216fc5b2e7e","Type":"ContainerStarted","Data":"5bf4b6ee662aa3af141939c800cabdbb44401784b3d086588962482f96adbff7"} Jan 29 12:08:33 crc kubenswrapper[4753]: I0129 12:08:33.139539 4753 patch_prober.go:28] interesting pod/router-default-5444994796-jpxd9 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 29 12:08:33 crc kubenswrapper[4753]: [-]has-synced failed: reason withheld Jan 29 12:08:33 crc kubenswrapper[4753]: [+]process-running ok Jan 29 12:08:33 crc kubenswrapper[4753]: healthz check failed Jan 29 12:08:33 crc kubenswrapper[4753]: I0129 12:08:33.139617 4753 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-jpxd9" podUID="8e54e1ee-a1b8-454c-8b09-e252ca3fe9da" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 29 12:08:33 crc kubenswrapper[4753]: I0129 12:08:33.153543 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-6b2dk" event={"ID":"7a2fedca-4a36-42ec-a6b6-63fcd2de6c07","Type":"ContainerStarted","Data":"4a81ce944a99917bbf14da38256d2973a3470835cd4afca9281395e84dac0979"} Jan 29 12:08:33 crc kubenswrapper[4753]: I0129 12:08:33.153721 4753 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-f6z5s container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.11:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 29 12:08:33 crc kubenswrapper[4753]: I0129 12:08:33.153776 4753 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-f6z5s" podUID="4b74c52f-102d-45ae-a789-0c43429e8aa0" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.11:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 29 12:08:33 crc kubenswrapper[4753]: I0129 12:08:33.189922 4753 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-f6z5s container/openshift-config-operator namespace/openshift-config-operator: Liveness probe status=failure output="Get \"https://10.217.0.11:8443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 29 12:08:33 crc kubenswrapper[4753]: I0129 12:08:33.190205 4753 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-f6z5s" podUID="4b74c52f-102d-45ae-a789-0c43429e8aa0" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.11:8443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 29 12:08:33 crc kubenswrapper[4753]: I0129 12:08:33.200885 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-779px" event={"ID":"368534d6-5359-447b-afd1-2a1c16c0c5d8","Type":"ContainerStarted","Data":"21ee0d69f450cd25d0db999734cea10574e6958db04f346303d495ad96af2aad"} Jan 29 12:08:33 crc kubenswrapper[4753]: I0129 12:08:33.254115 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:33 crc kubenswrapper[4753]: E0129 12:08:33.254521 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:33.75450817 +0000 UTC m=+128.006589615 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:33 crc kubenswrapper[4753]: I0129 12:08:33.270059 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/6110c9b0-6745-4468-b8c6-02277a67883c-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"6110c9b0-6745-4468-b8c6-02277a67883c\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 29 12:08:33 crc kubenswrapper[4753]: I0129 12:08:33.306625 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-4bpqt" event={"ID":"b6e8630c-f357-4c2e-88d0-45218ea79981","Type":"ContainerStarted","Data":"12603190b8484bc935ee4c65cee8e14c28547cfac767f3f28d1306348e8b3fdc"} Jan 29 12:08:33 crc kubenswrapper[4753]: I0129 12:08:33.362151 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:33 crc kubenswrapper[4753]: E0129 12:08:33.369513 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:33.869463138 +0000 UTC m=+128.121544593 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:33 crc kubenswrapper[4753]: I0129 12:08:33.414827 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:33 crc kubenswrapper[4753]: E0129 12:08:33.415398 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:33.915366315 +0000 UTC m=+128.167447770 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:33 crc kubenswrapper[4753]: I0129 12:08:33.420878 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" Jan 29 12:08:33 crc kubenswrapper[4753]: I0129 12:08:33.440284 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494800-86kw2" event={"ID":"4f48974c-8787-482a-b962-f7646e12952e","Type":"ContainerStarted","Data":"622532cb2281d0d8e36ef13ffa7a1468db1e0cd54769f22c744d688794ab718f"} Jan 29 12:08:33 crc kubenswrapper[4753]: I0129 12:08:33.463400 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-fp6bq" event={"ID":"279ec67d-fb6e-49a2-8339-854132f4e3ab","Type":"ContainerStarted","Data":"61c4e4e067695983be9108353dec4ccce15f4a776963bf8e45c12d4c0b448458"} Jan 29 12:08:33 crc kubenswrapper[4753]: I0129 12:08:33.468727 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-v88cc" event={"ID":"b41a4b6f-931f-41d8-86b4-a9f1193eb48a","Type":"ContainerStarted","Data":"89bb9fa01002af9592e6d479a3d6ad62751e9df3b37f33a7c29b9a6d6cca7169"} Jan 29 12:08:33 crc kubenswrapper[4753]: I0129 12:08:33.469304 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-v88cc" Jan 29 12:08:33 crc kubenswrapper[4753]: I0129 12:08:33.470830 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-gkf8g" event={"ID":"8fe1a105-96fa-4847-97a5-3d83d04a86ec","Type":"ContainerStarted","Data":"6c82983378bf24cc8c5d155c3de9e9745e727fe4d7721650e3f55f18cefef236"} Jan 29 12:08:33 crc kubenswrapper[4753]: I0129 12:08:33.471383 4753 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-v88cc container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.35:8443/healthz\": dial tcp 10.217.0.35:8443: connect: connection refused" start-of-body= Jan 29 12:08:33 crc kubenswrapper[4753]: I0129 12:08:33.471487 4753 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-v88cc" podUID="b41a4b6f-931f-41d8-86b4-a9f1193eb48a" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.35:8443/healthz\": dial tcp 10.217.0.35:8443: connect: connection refused" Jan 29 12:08:33 crc kubenswrapper[4753]: I0129 12:08:33.517790 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-gln66" event={"ID":"d8fa6dd3-8d5a-4518-a95a-d1782279340d","Type":"ContainerStarted","Data":"90e60f35601db6ec1b211ff7c3081d94f9b86c686c37b8c6531f834452909c52"} Jan 29 12:08:33 crc kubenswrapper[4753]: I0129 12:08:33.518182 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:33 crc kubenswrapper[4753]: I0129 12:08:33.519140 4753 patch_prober.go:28] interesting pod/console-operator-58897d9998-znsp6 container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.14:8443/readyz\": dial tcp 10.217.0.14:8443: connect: connection refused" start-of-body= Jan 29 12:08:33 crc kubenswrapper[4753]: I0129 12:08:33.519203 4753 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-znsp6" podUID="f9d4b340-8ceb-41e1-8289-f48222e5d3d8" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.14:8443/readyz\": dial tcp 10.217.0.14:8443: connect: connection refused" Jan 29 12:08:33 crc kubenswrapper[4753]: E0129 12:08:33.522340 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:34.022296576 +0000 UTC m=+128.274378031 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:33 crc kubenswrapper[4753]: I0129 12:08:33.522834 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:33 crc kubenswrapper[4753]: E0129 12:08:33.523825 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:34.02379203 +0000 UTC m=+128.275873485 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:33 crc kubenswrapper[4753]: I0129 12:08:33.725930 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-79sb7" Jan 29 12:08:33 crc kubenswrapper[4753]: I0129 12:08:33.733748 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:33 crc kubenswrapper[4753]: E0129 12:08:33.737298 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:34.237258 +0000 UTC m=+128.489339455 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:33 crc kubenswrapper[4753]: I0129 12:08:33.743850 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-8dvz9" podStartSLOduration=94.743821964 podStartE2EDuration="1m34.743821964s" podCreationTimestamp="2026-01-29 12:06:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:08:33.459768698 +0000 UTC m=+127.711850153" watchObservedRunningTime="2026-01-29 12:08:33.743821964 +0000 UTC m=+127.995903419" Jan 29 12:08:33 crc kubenswrapper[4753]: I0129 12:08:33.845583 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:33 crc kubenswrapper[4753]: E0129 12:08:33.846121 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:34.346103818 +0000 UTC m=+128.598185273 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:34 crc kubenswrapper[4753]: I0129 12:08:34.009436 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:34 crc kubenswrapper[4753]: E0129 12:08:34.009920 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:34.50989911 +0000 UTC m=+128.761980575 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:34 crc kubenswrapper[4753]: I0129 12:08:34.025270 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29494800-86kw2" podStartSLOduration=97.025243503 podStartE2EDuration="1m37.025243503s" podCreationTimestamp="2026-01-29 12:06:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:08:33.7433318 +0000 UTC m=+127.995413265" watchObservedRunningTime="2026-01-29 12:08:34.025243503 +0000 UTC m=+128.277324958" Jan 29 12:08:34 crc kubenswrapper[4753]: I0129 12:08:34.110484 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:34 crc kubenswrapper[4753]: E0129 12:08:34.111653 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:34.611635477 +0000 UTC m=+128.863716932 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:34 crc kubenswrapper[4753]: I0129 12:08:34.142899 4753 patch_prober.go:28] interesting pod/router-default-5444994796-jpxd9 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 29 12:08:34 crc kubenswrapper[4753]: [-]has-synced failed: reason withheld Jan 29 12:08:34 crc kubenswrapper[4753]: [+]process-running ok Jan 29 12:08:34 crc kubenswrapper[4753]: healthz check failed Jan 29 12:08:34 crc kubenswrapper[4753]: I0129 12:08:34.143347 4753 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-jpxd9" podUID="8e54e1ee-a1b8-454c-8b09-e252ca3fe9da" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 29 12:08:34 crc kubenswrapper[4753]: I0129 12:08:34.152669 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-v88cc" podStartSLOduration=95.15264821 podStartE2EDuration="1m35.15264821s" podCreationTimestamp="2026-01-29 12:06:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:08:34.144444037 +0000 UTC m=+128.396525492" watchObservedRunningTime="2026-01-29 12:08:34.15264821 +0000 UTC m=+128.404729665" Jan 29 12:08:34 crc kubenswrapper[4753]: I0129 12:08:34.246342 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:34 crc kubenswrapper[4753]: E0129 12:08:34.246866 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:34.746833573 +0000 UTC m=+128.998915028 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:34 crc kubenswrapper[4753]: I0129 12:08:34.294343 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 29 12:08:34 crc kubenswrapper[4753]: I0129 12:08:34.486799 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:34 crc kubenswrapper[4753]: I0129 12:08:34.487816 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-admission-controller-857f4d67dd-gln66" podStartSLOduration=95.487793526 podStartE2EDuration="1m35.487793526s" podCreationTimestamp="2026-01-29 12:06:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:08:34.487215159 +0000 UTC m=+128.739296614" watchObservedRunningTime="2026-01-29 12:08:34.487793526 +0000 UTC m=+128.739874981" Jan 29 12:08:34 crc kubenswrapper[4753]: E0129 12:08:34.489433 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:34.989415164 +0000 UTC m=+129.241496619 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:34 crc kubenswrapper[4753]: I0129 12:08:34.606578 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:34 crc kubenswrapper[4753]: E0129 12:08:34.607056 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:35.10703109 +0000 UTC m=+129.359112545 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:34 crc kubenswrapper[4753]: I0129 12:08:34.725005 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:34 crc kubenswrapper[4753]: E0129 12:08:34.725438 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:35.22541941 +0000 UTC m=+129.477500875 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:34 crc kubenswrapper[4753]: I0129 12:08:34.727180 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-mql86" event={"ID":"91845c42-ac54-4831-b8ac-73737902b703","Type":"ContainerStarted","Data":"fc8494f06c31d72c132f672089242706abdd6fccd57b6666d27aef5f61eb3005"} Jan 29 12:08:34 crc kubenswrapper[4753]: I0129 12:08:34.826742 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:34 crc kubenswrapper[4753]: E0129 12:08:34.828949 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:35.328922349 +0000 UTC m=+129.581003804 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:34 crc kubenswrapper[4753]: I0129 12:08:34.839491 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-nbdct" event={"ID":"96da6032-b04a-4eef-8170-a57f2f9b00ba","Type":"ContainerStarted","Data":"126fdd5f4a57ff13762189d20d35eb62bd16cebdee80f42a479e9faf09952ee5"} Jan 29 12:08:34 crc kubenswrapper[4753]: I0129 12:08:34.931809 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:34 crc kubenswrapper[4753]: E0129 12:08:34.932593 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:35.432573974 +0000 UTC m=+129.684655439 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:35 crc kubenswrapper[4753]: I0129 12:08:35.081582 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:35 crc kubenswrapper[4753]: E0129 12:08:35.082068 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:35.582044922 +0000 UTC m=+129.834126377 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:35 crc kubenswrapper[4753]: I0129 12:08:35.110398 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-8kz2s" Jan 29 12:08:35 crc kubenswrapper[4753]: I0129 12:08:35.118616 4753 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-8kz2s container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.26:5443/healthz\": dial tcp 10.217.0.26:5443: connect: connection refused" start-of-body= Jan 29 12:08:35 crc kubenswrapper[4753]: I0129 12:08:35.118676 4753 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-8kz2s" podUID="3cea010a-6482-47b3-9c79-45432b921956" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.26:5443/healthz\": dial tcp 10.217.0.26:5443: connect: connection refused" Jan 29 12:08:35 crc kubenswrapper[4753]: I0129 12:08:35.129797 4753 patch_prober.go:28] interesting pod/router-default-5444994796-jpxd9 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 29 12:08:35 crc kubenswrapper[4753]: [-]has-synced failed: reason withheld Jan 29 12:08:35 crc kubenswrapper[4753]: [+]process-running ok Jan 29 12:08:35 crc kubenswrapper[4753]: healthz check failed Jan 29 12:08:35 crc kubenswrapper[4753]: I0129 12:08:35.129864 4753 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-jpxd9" podUID="8e54e1ee-a1b8-454c-8b09-e252ca3fe9da" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 29 12:08:35 crc kubenswrapper[4753]: I0129 12:08:35.133414 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-2kbdk" event={"ID":"b1377377-5ec9-4ff9-8550-aa1ae603a8cf","Type":"ContainerStarted","Data":"eff594596412e54d51b63707e1a1cd1512ae70da9d6fdcecf629fc52db92a0e0"} Jan 29 12:08:35 crc kubenswrapper[4753]: I0129 12:08:35.136851 4753 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-v88cc container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.35:8443/healthz\": dial tcp 10.217.0.35:8443: connect: connection refused" start-of-body= Jan 29 12:08:35 crc kubenswrapper[4753]: I0129 12:08:35.136924 4753 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-v88cc" podUID="b41a4b6f-931f-41d8-86b4-a9f1193eb48a" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.35:8443/healthz\": dial tcp 10.217.0.35:8443: connect: connection refused" Jan 29 12:08:35 crc kubenswrapper[4753]: I0129 12:08:35.324950 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:35 crc kubenswrapper[4753]: E0129 12:08:35.325439 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:35.825423587 +0000 UTC m=+130.077505042 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:35 crc kubenswrapper[4753]: I0129 12:08:35.366731 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-7777fb866f-f6z5s" Jan 29 12:08:35 crc kubenswrapper[4753]: I0129 12:08:35.468659 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:35 crc kubenswrapper[4753]: E0129 12:08:35.469908 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:35.969890387 +0000 UTC m=+130.221971842 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:35 crc kubenswrapper[4753]: I0129 12:08:35.585310 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:35 crc kubenswrapper[4753]: E0129 12:08:35.587012 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:36.086989849 +0000 UTC m=+130.339071304 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:35 crc kubenswrapper[4753]: I0129 12:08:35.745825 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:35 crc kubenswrapper[4753]: E0129 12:08:35.747455 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:36.247428341 +0000 UTC m=+130.499509796 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:36 crc kubenswrapper[4753]: I0129 12:08:35.847219 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:36 crc kubenswrapper[4753]: E0129 12:08:35.847618 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:36.347604773 +0000 UTC m=+130.599686228 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:36 crc kubenswrapper[4753]: I0129 12:08:35.989339 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:36 crc kubenswrapper[4753]: E0129 12:08:35.990328 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:36.490283951 +0000 UTC m=+130.742365436 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:36 crc kubenswrapper[4753]: I0129 12:08:36.017524 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-fp6bq" podStartSLOduration=98.017505565 podStartE2EDuration="1m38.017505565s" podCreationTimestamp="2026-01-29 12:06:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:08:36.017493915 +0000 UTC m=+130.269575380" watchObservedRunningTime="2026-01-29 12:08:36.017505565 +0000 UTC m=+130.269587030" Jan 29 12:08:36 crc kubenswrapper[4753]: I0129 12:08:36.101377 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:36 crc kubenswrapper[4753]: E0129 12:08:36.102119 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:36.602082695 +0000 UTC m=+130.854164160 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:36 crc kubenswrapper[4753]: I0129 12:08:36.127145 4753 patch_prober.go:28] interesting pod/router-default-5444994796-jpxd9 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 29 12:08:36 crc kubenswrapper[4753]: [-]has-synced failed: reason withheld Jan 29 12:08:36 crc kubenswrapper[4753]: [+]process-running ok Jan 29 12:08:36 crc kubenswrapper[4753]: healthz check failed Jan 29 12:08:36 crc kubenswrapper[4753]: I0129 12:08:36.127572 4753 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-jpxd9" podUID="8e54e1ee-a1b8-454c-8b09-e252ca3fe9da" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 29 12:08:36 crc kubenswrapper[4753]: I0129 12:08:36.297564 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:36 crc kubenswrapper[4753]: E0129 12:08:36.316862 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:36.797927685 +0000 UTC m=+131.050009140 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:36 crc kubenswrapper[4753]: I0129 12:08:36.336693 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-gkf8g" event={"ID":"8fe1a105-96fa-4847-97a5-3d83d04a86ec","Type":"ContainerStarted","Data":"195bc63971eda8bdbb62bdce42b3cd3c1103c93e605f105f0d20b464b176725a"} Jan 29 12:08:36 crc kubenswrapper[4753]: I0129 12:08:36.435727 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:36 crc kubenswrapper[4753]: E0129 12:08:36.436774 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:36.936751738 +0000 UTC m=+131.188833193 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:36 crc kubenswrapper[4753]: I0129 12:08:36.523777 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-8kz2s" event={"ID":"3cea010a-6482-47b3-9c79-45432b921956","Type":"ContainerStarted","Data":"80993afb8d5a58a6f685d9936e71a4d53d0abe509109da2d03a98f7e320cadd6"} Jan 29 12:08:36 crc kubenswrapper[4753]: I0129 12:08:36.524346 4753 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-8kz2s container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.26:5443/healthz\": dial tcp 10.217.0.26:5443: connect: connection refused" start-of-body= Jan 29 12:08:36 crc kubenswrapper[4753]: I0129 12:08:36.524398 4753 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-8kz2s" podUID="3cea010a-6482-47b3-9c79-45432b921956" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.26:5443/healthz\": dial tcp 10.217.0.26:5443: connect: connection refused" Jan 29 12:08:36 crc kubenswrapper[4753]: I0129 12:08:36.541163 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:36 crc kubenswrapper[4753]: E0129 12:08:36.543064 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:37.04303169 +0000 UTC m=+131.295113155 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:36 crc kubenswrapper[4753]: I0129 12:08:36.790718 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:36 crc kubenswrapper[4753]: E0129 12:08:36.792839 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:37.292818964 +0000 UTC m=+131.544900419 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:36 crc kubenswrapper[4753]: I0129 12:08:36.816960 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Jan 29 12:08:36 crc kubenswrapper[4753]: I0129 12:08:36.826502 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 29 12:08:36 crc kubenswrapper[4753]: I0129 12:08:36.827514 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-nbdct" podStartSLOduration=98.827500999 podStartE2EDuration="1m38.827500999s" podCreationTimestamp="2026-01-29 12:06:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:08:36.826720616 +0000 UTC m=+131.078802071" watchObservedRunningTime="2026-01-29 12:08:36.827500999 +0000 UTC m=+131.079582444" Jan 29 12:08:36 crc kubenswrapper[4753]: I0129 12:08:36.839138 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Jan 29 12:08:36 crc kubenswrapper[4753]: I0129 12:08:36.860278 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Jan 29 12:08:36 crc kubenswrapper[4753]: I0129 12:08:36.867051 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Jan 29 12:08:36 crc kubenswrapper[4753]: I0129 12:08:36.902575 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:36 crc kubenswrapper[4753]: E0129 12:08:36.903050 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:37.403028242 +0000 UTC m=+131.655109697 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:36 crc kubenswrapper[4753]: I0129 12:08:36.943188 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-8kz2s" podStartSLOduration=97.943163038 podStartE2EDuration="1m37.943163038s" podCreationTimestamp="2026-01-29 12:06:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:08:36.942127868 +0000 UTC m=+131.194209333" watchObservedRunningTime="2026-01-29 12:08:36.943163038 +0000 UTC m=+131.195244493" Jan 29 12:08:37 crc kubenswrapper[4753]: I0129 12:08:37.004438 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/6ab526de-deef-4ec9-9523-95137afadeca-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"6ab526de-deef-4ec9-9523-95137afadeca\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 29 12:08:37 crc kubenswrapper[4753]: I0129 12:08:37.004531 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/6ab526de-deef-4ec9-9523-95137afadeca-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"6ab526de-deef-4ec9-9523-95137afadeca\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 29 12:08:37 crc kubenswrapper[4753]: I0129 12:08:37.004636 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:37 crc kubenswrapper[4753]: E0129 12:08:37.005253 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:37.505207132 +0000 UTC m=+131.757288587 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:37 crc kubenswrapper[4753]: I0129 12:08:37.174210 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:37 crc kubenswrapper[4753]: I0129 12:08:37.174659 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/6ab526de-deef-4ec9-9523-95137afadeca-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"6ab526de-deef-4ec9-9523-95137afadeca\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 29 12:08:37 crc kubenswrapper[4753]: I0129 12:08:37.174712 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/6ab526de-deef-4ec9-9523-95137afadeca-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"6ab526de-deef-4ec9-9523-95137afadeca\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 29 12:08:37 crc kubenswrapper[4753]: I0129 12:08:37.174911 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/6ab526de-deef-4ec9-9523-95137afadeca-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"6ab526de-deef-4ec9-9523-95137afadeca\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 29 12:08:37 crc kubenswrapper[4753]: E0129 12:08:37.175033 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:37.675005772 +0000 UTC m=+131.927087227 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:37 crc kubenswrapper[4753]: I0129 12:08:37.277725 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:37 crc kubenswrapper[4753]: E0129 12:08:37.278085 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:37.778069929 +0000 UTC m=+132.030151384 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:37 crc kubenswrapper[4753]: I0129 12:08:37.280482 4753 patch_prober.go:28] interesting pod/router-default-5444994796-jpxd9 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 29 12:08:37 crc kubenswrapper[4753]: [-]has-synced failed: reason withheld Jan 29 12:08:37 crc kubenswrapper[4753]: [+]process-running ok Jan 29 12:08:37 crc kubenswrapper[4753]: healthz check failed Jan 29 12:08:37 crc kubenswrapper[4753]: I0129 12:08:37.280559 4753 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-jpxd9" podUID="8e54e1ee-a1b8-454c-8b09-e252ca3fe9da" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 29 12:08:37 crc kubenswrapper[4753]: I0129 12:08:37.463180 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:37 crc kubenswrapper[4753]: E0129 12:08:37.463656 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:37.963636044 +0000 UTC m=+132.215717499 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:37 crc kubenswrapper[4753]: I0129 12:08:37.470172 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-f9d7485db-mql86" podStartSLOduration=99.470140726 podStartE2EDuration="1m39.470140726s" podCreationTimestamp="2026-01-29 12:06:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:08:37.326928613 +0000 UTC m=+131.579010088" watchObservedRunningTime="2026-01-29 12:08:37.470140726 +0000 UTC m=+131.722222181" Jan 29 12:08:37 crc kubenswrapper[4753]: I0129 12:08:37.470701 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-t89jd"] Jan 29 12:08:37 crc kubenswrapper[4753]: I0129 12:08:37.471893 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-t89jd" Jan 29 12:08:37 crc kubenswrapper[4753]: I0129 12:08:37.548794 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Jan 29 12:08:37 crc kubenswrapper[4753]: I0129 12:08:37.553869 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/6ab526de-deef-4ec9-9523-95137afadeca-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"6ab526de-deef-4ec9-9523-95137afadeca\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 29 12:08:37 crc kubenswrapper[4753]: I0129 12:08:37.569687 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:37 crc kubenswrapper[4753]: E0129 12:08:37.570176 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:38.070159583 +0000 UTC m=+132.322241138 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:37 crc kubenswrapper[4753]: I0129 12:08:37.667579 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 29 12:08:37 crc kubenswrapper[4753]: I0129 12:08:37.668568 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-canary/ingress-canary-2kbdk" podStartSLOduration=22.668546561 podStartE2EDuration="22.668546561s" podCreationTimestamp="2026-01-29 12:08:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:08:37.667716457 +0000 UTC m=+131.919797912" watchObservedRunningTime="2026-01-29 12:08:37.668546561 +0000 UTC m=+131.920628016" Jan 29 12:08:37 crc kubenswrapper[4753]: I0129 12:08:37.819021 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:37 crc kubenswrapper[4753]: I0129 12:08:37.819330 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mg8l8\" (UniqueName: \"kubernetes.io/projected/ce6846a0-6c85-4ae9-afae-b10ead46d21d-kube-api-access-mg8l8\") pod \"community-operators-t89jd\" (UID: \"ce6846a0-6c85-4ae9-afae-b10ead46d21d\") " pod="openshift-marketplace/community-operators-t89jd" Jan 29 12:08:37 crc kubenswrapper[4753]: I0129 12:08:37.819375 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ce6846a0-6c85-4ae9-afae-b10ead46d21d-utilities\") pod \"community-operators-t89jd\" (UID: \"ce6846a0-6c85-4ae9-afae-b10ead46d21d\") " pod="openshift-marketplace/community-operators-t89jd" Jan 29 12:08:37 crc kubenswrapper[4753]: I0129 12:08:37.819514 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ce6846a0-6c85-4ae9-afae-b10ead46d21d-catalog-content\") pod \"community-operators-t89jd\" (UID: \"ce6846a0-6c85-4ae9-afae-b10ead46d21d\") " pod="openshift-marketplace/community-operators-t89jd" Jan 29 12:08:37 crc kubenswrapper[4753]: E0129 12:08:37.819662 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:38.319641448 +0000 UTC m=+132.571722903 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:37 crc kubenswrapper[4753]: I0129 12:08:37.867244 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-t89jd"] Jan 29 12:08:38 crc kubenswrapper[4753]: I0129 12:08:37.999077 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:38 crc kubenswrapper[4753]: I0129 12:08:37.999278 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ce6846a0-6c85-4ae9-afae-b10ead46d21d-catalog-content\") pod \"community-operators-t89jd\" (UID: \"ce6846a0-6c85-4ae9-afae-b10ead46d21d\") " pod="openshift-marketplace/community-operators-t89jd" Jan 29 12:08:38 crc kubenswrapper[4753]: I0129 12:08:37.999437 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mg8l8\" (UniqueName: \"kubernetes.io/projected/ce6846a0-6c85-4ae9-afae-b10ead46d21d-kube-api-access-mg8l8\") pod \"community-operators-t89jd\" (UID: \"ce6846a0-6c85-4ae9-afae-b10ead46d21d\") " pod="openshift-marketplace/community-operators-t89jd" Jan 29 12:08:38 crc kubenswrapper[4753]: I0129 12:08:37.999475 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ce6846a0-6c85-4ae9-afae-b10ead46d21d-utilities\") pod \"community-operators-t89jd\" (UID: \"ce6846a0-6c85-4ae9-afae-b10ead46d21d\") " pod="openshift-marketplace/community-operators-t89jd" Jan 29 12:08:38 crc kubenswrapper[4753]: I0129 12:08:38.005893 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ce6846a0-6c85-4ae9-afae-b10ead46d21d-catalog-content\") pod \"community-operators-t89jd\" (UID: \"ce6846a0-6c85-4ae9-afae-b10ead46d21d\") " pod="openshift-marketplace/community-operators-t89jd" Jan 29 12:08:38 crc kubenswrapper[4753]: I0129 12:08:38.006752 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ce6846a0-6c85-4ae9-afae-b10ead46d21d-utilities\") pod \"community-operators-t89jd\" (UID: \"ce6846a0-6c85-4ae9-afae-b10ead46d21d\") " pod="openshift-marketplace/community-operators-t89jd" Jan 29 12:08:38 crc kubenswrapper[4753]: E0129 12:08:38.015900 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:38.515871468 +0000 UTC m=+132.767952933 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:38 crc kubenswrapper[4753]: I0129 12:08:38.043480 4753 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-lswml container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.38:8080/healthz\": dial tcp 10.217.0.38:8080: connect: connection refused" start-of-body= Jan 29 12:08:38 crc kubenswrapper[4753]: I0129 12:08:38.043561 4753 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-lswml" podUID="dad90295-65db-470c-8041-19fcf86d0439" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.38:8080/healthz\": dial tcp 10.217.0.38:8080: connect: connection refused" Jan 29 12:08:38 crc kubenswrapper[4753]: I0129 12:08:38.045296 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-lswml" Jan 29 12:08:38 crc kubenswrapper[4753]: I0129 12:08:38.045643 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-pcxth"] Jan 29 12:08:38 crc kubenswrapper[4753]: I0129 12:08:38.046753 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-lswml" event={"ID":"dad90295-65db-470c-8041-19fcf86d0439","Type":"ContainerStarted","Data":"3edfd10e3371577561cb5294e3223259b5cdccc67802061c5edac4bd5dea3ee0"} Jan 29 12:08:38 crc kubenswrapper[4753]: I0129 12:08:38.046794 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-bsxpd"] Jan 29 12:08:38 crc kubenswrapper[4753]: I0129 12:08:38.107206 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-pcxth" Jan 29 12:08:38 crc kubenswrapper[4753]: I0129 12:08:38.170185 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:38 crc kubenswrapper[4753]: E0129 12:08:38.170949 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:38.670929281 +0000 UTC m=+132.923010736 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:38 crc kubenswrapper[4753]: I0129 12:08:38.171067 4753 patch_prober.go:28] interesting pod/router-default-5444994796-jpxd9 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 29 12:08:38 crc kubenswrapper[4753]: [-]has-synced failed: reason withheld Jan 29 12:08:38 crc kubenswrapper[4753]: [+]process-running ok Jan 29 12:08:38 crc kubenswrapper[4753]: healthz check failed Jan 29 12:08:38 crc kubenswrapper[4753]: I0129 12:08:38.171122 4753 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-jpxd9" podUID="8e54e1ee-a1b8-454c-8b09-e252ca3fe9da" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 29 12:08:38 crc kubenswrapper[4753]: I0129 12:08:38.236623 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Jan 29 12:08:38 crc kubenswrapper[4753]: I0129 12:08:38.276768 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-p6m5g" event={"ID":"2412fa08-e643-4225-b494-eb999ea93fce","Type":"ContainerStarted","Data":"7d237a79acc588fbada9afd741b682a56e988a3ee4eac68f421a907ddd9e12a4"} Jan 29 12:08:38 crc kubenswrapper[4753]: I0129 12:08:38.276830 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-j4xlb" event={"ID":"e491ff79-3c18-4cf1-9e8b-3216fc5b2e7e","Type":"ContainerStarted","Data":"f5a7d52722b48cb291e9bd77256d95a6f72afbb29ff4a957ff63d32643d0c344"} Jan 29 12:08:38 crc kubenswrapper[4753]: I0129 12:08:38.276856 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-pcxth"] Jan 29 12:08:38 crc kubenswrapper[4753]: I0129 12:08:38.276883 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-bsxpd"] Jan 29 12:08:38 crc kubenswrapper[4753]: I0129 12:08:38.277079 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bsxpd" Jan 29 12:08:38 crc kubenswrapper[4753]: I0129 12:08:38.279239 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/225f75d2-06ff-4a8e-ad48-5fb73aba9a5f-catalog-content\") pod \"certified-operators-pcxth\" (UID: \"225f75d2-06ff-4a8e-ad48-5fb73aba9a5f\") " pod="openshift-marketplace/certified-operators-pcxth" Jan 29 12:08:38 crc kubenswrapper[4753]: I0129 12:08:38.279307 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/225f75d2-06ff-4a8e-ad48-5fb73aba9a5f-utilities\") pod \"certified-operators-pcxth\" (UID: \"225f75d2-06ff-4a8e-ad48-5fb73aba9a5f\") " pod="openshift-marketplace/certified-operators-pcxth" Jan 29 12:08:38 crc kubenswrapper[4753]: I0129 12:08:38.279370 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zkq7z\" (UniqueName: \"kubernetes.io/projected/ae52688b-6f7a-441f-927b-ab547b7ce44f-kube-api-access-zkq7z\") pod \"community-operators-bsxpd\" (UID: \"ae52688b-6f7a-441f-927b-ab547b7ce44f\") " pod="openshift-marketplace/community-operators-bsxpd" Jan 29 12:08:38 crc kubenswrapper[4753]: I0129 12:08:38.279444 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ae52688b-6f7a-441f-927b-ab547b7ce44f-utilities\") pod \"community-operators-bsxpd\" (UID: \"ae52688b-6f7a-441f-927b-ab547b7ce44f\") " pod="openshift-marketplace/community-operators-bsxpd" Jan 29 12:08:38 crc kubenswrapper[4753]: I0129 12:08:38.279526 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ae52688b-6f7a-441f-927b-ab547b7ce44f-catalog-content\") pod \"community-operators-bsxpd\" (UID: \"ae52688b-6f7a-441f-927b-ab547b7ce44f\") " pod="openshift-marketplace/community-operators-bsxpd" Jan 29 12:08:38 crc kubenswrapper[4753]: I0129 12:08:38.279573 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:38 crc kubenswrapper[4753]: I0129 12:08:38.279639 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pbc79\" (UniqueName: \"kubernetes.io/projected/225f75d2-06ff-4a8e-ad48-5fb73aba9a5f-kube-api-access-pbc79\") pod \"certified-operators-pcxth\" (UID: \"225f75d2-06ff-4a8e-ad48-5fb73aba9a5f\") " pod="openshift-marketplace/certified-operators-pcxth" Jan 29 12:08:38 crc kubenswrapper[4753]: E0129 12:08:38.280957 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:38.780939083 +0000 UTC m=+133.033020538 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:38 crc kubenswrapper[4753]: I0129 12:08:38.687471 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:38 crc kubenswrapper[4753]: I0129 12:08:38.687741 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ae52688b-6f7a-441f-927b-ab547b7ce44f-catalog-content\") pod \"community-operators-bsxpd\" (UID: \"ae52688b-6f7a-441f-927b-ab547b7ce44f\") " pod="openshift-marketplace/community-operators-bsxpd" Jan 29 12:08:38 crc kubenswrapper[4753]: I0129 12:08:38.687854 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pbc79\" (UniqueName: \"kubernetes.io/projected/225f75d2-06ff-4a8e-ad48-5fb73aba9a5f-kube-api-access-pbc79\") pod \"certified-operators-pcxth\" (UID: \"225f75d2-06ff-4a8e-ad48-5fb73aba9a5f\") " pod="openshift-marketplace/certified-operators-pcxth" Jan 29 12:08:38 crc kubenswrapper[4753]: I0129 12:08:38.687943 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/225f75d2-06ff-4a8e-ad48-5fb73aba9a5f-catalog-content\") pod \"certified-operators-pcxth\" (UID: \"225f75d2-06ff-4a8e-ad48-5fb73aba9a5f\") " pod="openshift-marketplace/certified-operators-pcxth" Jan 29 12:08:38 crc kubenswrapper[4753]: I0129 12:08:38.687987 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/225f75d2-06ff-4a8e-ad48-5fb73aba9a5f-utilities\") pod \"certified-operators-pcxth\" (UID: \"225f75d2-06ff-4a8e-ad48-5fb73aba9a5f\") " pod="openshift-marketplace/certified-operators-pcxth" Jan 29 12:08:38 crc kubenswrapper[4753]: I0129 12:08:38.688039 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zkq7z\" (UniqueName: \"kubernetes.io/projected/ae52688b-6f7a-441f-927b-ab547b7ce44f-kube-api-access-zkq7z\") pod \"community-operators-bsxpd\" (UID: \"ae52688b-6f7a-441f-927b-ab547b7ce44f\") " pod="openshift-marketplace/community-operators-bsxpd" Jan 29 12:08:38 crc kubenswrapper[4753]: I0129 12:08:38.688116 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ae52688b-6f7a-441f-927b-ab547b7ce44f-utilities\") pod \"community-operators-bsxpd\" (UID: \"ae52688b-6f7a-441f-927b-ab547b7ce44f\") " pod="openshift-marketplace/community-operators-bsxpd" Jan 29 12:08:38 crc kubenswrapper[4753]: I0129 12:08:38.689796 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ae52688b-6f7a-441f-927b-ab547b7ce44f-utilities\") pod \"community-operators-bsxpd\" (UID: \"ae52688b-6f7a-441f-927b-ab547b7ce44f\") " pod="openshift-marketplace/community-operators-bsxpd" Jan 29 12:08:38 crc kubenswrapper[4753]: E0129 12:08:38.690017 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:39.189979064 +0000 UTC m=+133.442060519 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:38 crc kubenswrapper[4753]: I0129 12:08:38.690583 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ae52688b-6f7a-441f-927b-ab547b7ce44f-catalog-content\") pod \"community-operators-bsxpd\" (UID: \"ae52688b-6f7a-441f-927b-ab547b7ce44f\") " pod="openshift-marketplace/community-operators-bsxpd" Jan 29 12:08:38 crc kubenswrapper[4753]: I0129 12:08:38.704523 4753 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-lswml container/marketplace-operator namespace/openshift-marketplace: Liveness probe status=failure output="Get \"http://10.217.0.38:8080/healthz\": dial tcp 10.217.0.38:8080: connect: connection refused" start-of-body= Jan 29 12:08:38 crc kubenswrapper[4753]: I0129 12:08:38.718120 4753 patch_prober.go:28] interesting pod/console-operator-58897d9998-znsp6 container/console-operator namespace/openshift-console-operator: Liveness probe status=failure output="Get \"https://10.217.0.14:8443/healthz\": dial tcp 10.217.0.14:8443: connect: connection refused" start-of-body= Jan 29 12:08:38 crc kubenswrapper[4753]: I0129 12:08:38.718282 4753 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console-operator/console-operator-58897d9998-znsp6" podUID="f9d4b340-8ceb-41e1-8289-f48222e5d3d8" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.14:8443/healthz\": dial tcp 10.217.0.14:8443: connect: connection refused" Jan 29 12:08:38 crc kubenswrapper[4753]: I0129 12:08:38.718471 4753 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-lswml container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.38:8080/healthz\": dial tcp 10.217.0.38:8080: connect: connection refused" start-of-body= Jan 29 12:08:38 crc kubenswrapper[4753]: I0129 12:08:38.718500 4753 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-lswml" podUID="dad90295-65db-470c-8041-19fcf86d0439" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.38:8080/healthz\": dial tcp 10.217.0.38:8080: connect: connection refused" Jan 29 12:08:38 crc kubenswrapper[4753]: I0129 12:08:38.721998 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/225f75d2-06ff-4a8e-ad48-5fb73aba9a5f-catalog-content\") pod \"certified-operators-pcxth\" (UID: \"225f75d2-06ff-4a8e-ad48-5fb73aba9a5f\") " pod="openshift-marketplace/certified-operators-pcxth" Jan 29 12:08:38 crc kubenswrapper[4753]: I0129 12:08:38.722397 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/225f75d2-06ff-4a8e-ad48-5fb73aba9a5f-utilities\") pod \"certified-operators-pcxth\" (UID: \"225f75d2-06ff-4a8e-ad48-5fb73aba9a5f\") " pod="openshift-marketplace/certified-operators-pcxth" Jan 29 12:08:38 crc kubenswrapper[4753]: I0129 12:08:38.723124 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-5clxb"] Jan 29 12:08:38 crc kubenswrapper[4753]: I0129 12:08:38.731910 4753 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-v88cc container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.35:8443/healthz\": dial tcp 10.217.0.35:8443: connect: connection refused" start-of-body= Jan 29 12:08:38 crc kubenswrapper[4753]: I0129 12:08:38.733190 4753 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-v88cc" podUID="b41a4b6f-931f-41d8-86b4-a9f1193eb48a" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.35:8443/healthz\": dial tcp 10.217.0.35:8443: connect: connection refused" Jan 29 12:08:38 crc kubenswrapper[4753]: I0129 12:08:38.734774 4753 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-v88cc container/olm-operator namespace/openshift-operator-lifecycle-manager: Liveness probe status=failure output="Get \"https://10.217.0.35:8443/healthz\": dial tcp 10.217.0.35:8443: connect: connection refused" start-of-body= Jan 29 12:08:38 crc kubenswrapper[4753]: I0129 12:08:38.743310 4753 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-v88cc" podUID="b41a4b6f-931f-41d8-86b4-a9f1193eb48a" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.35:8443/healthz\": dial tcp 10.217.0.35:8443: connect: connection refused" Jan 29 12:08:38 crc kubenswrapper[4753]: I0129 12:08:38.783870 4753 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-marketplace/marketplace-operator-79b997595-lswml" podUID="dad90295-65db-470c-8041-19fcf86d0439" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.38:8080/healthz\": dial tcp 10.217.0.38:8080: connect: connection refused" Jan 29 12:08:38 crc kubenswrapper[4753]: I0129 12:08:38.793743 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:39 crc kubenswrapper[4753]: E0129 12:08:39.028471 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:39.5284276 +0000 UTC m=+133.780509585 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:39 crc kubenswrapper[4753]: I0129 12:08:39.044443 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:39 crc kubenswrapper[4753]: E0129 12:08:39.045404 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:39.545273658 +0000 UTC m=+133.797355123 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:39 crc kubenswrapper[4753]: I0129 12:08:39.046905 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:39 crc kubenswrapper[4753]: E0129 12:08:39.055934 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:39.555911462 +0000 UTC m=+133.807992917 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:39 crc kubenswrapper[4753]: I0129 12:08:39.058043 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-f9d7485db-mql86" Jan 29 12:08:39 crc kubenswrapper[4753]: I0129 12:08:39.058139 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-779px" Jan 29 12:08:39 crc kubenswrapper[4753]: I0129 12:08:39.058155 4753 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-f9d7485db-mql86" Jan 29 12:08:39 crc kubenswrapper[4753]: I0129 12:08:39.058178 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-779px" event={"ID":"368534d6-5359-447b-afd1-2a1c16c0c5d8","Type":"ContainerStarted","Data":"f508b8dd31e82835493a1ec5c307cc5259e92891f4b956059215ca4311ff9005"} Jan 29 12:08:39 crc kubenswrapper[4753]: I0129 12:08:39.058356 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5clxb" Jan 29 12:08:39 crc kubenswrapper[4753]: I0129 12:08:39.123568 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mg8l8\" (UniqueName: \"kubernetes.io/projected/ce6846a0-6c85-4ae9-afae-b10ead46d21d-kube-api-access-mg8l8\") pod \"community-operators-t89jd\" (UID: \"ce6846a0-6c85-4ae9-afae-b10ead46d21d\") " pod="openshift-marketplace/community-operators-t89jd" Jan 29 12:08:39 crc kubenswrapper[4753]: I0129 12:08:39.124243 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pbc79\" (UniqueName: \"kubernetes.io/projected/225f75d2-06ff-4a8e-ad48-5fb73aba9a5f-kube-api-access-pbc79\") pod \"certified-operators-pcxth\" (UID: \"225f75d2-06ff-4a8e-ad48-5fb73aba9a5f\") " pod="openshift-marketplace/certified-operators-pcxth" Jan 29 12:08:39 crc kubenswrapper[4753]: I0129 12:08:39.124814 4753 patch_prober.go:28] interesting pod/console-f9d7485db-mql86 container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.15:8443/health\": dial tcp 10.217.0.15:8443: connect: connection refused" start-of-body= Jan 29 12:08:39 crc kubenswrapper[4753]: I0129 12:08:39.124940 4753 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-mql86" podUID="91845c42-ac54-4831-b8ac-73737902b703" containerName="console" probeResult="failure" output="Get \"https://10.217.0.15:8443/health\": dial tcp 10.217.0.15:8443: connect: connection refused" Jan 29 12:08:39 crc kubenswrapper[4753]: I0129 12:08:39.126295 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zkq7z\" (UniqueName: \"kubernetes.io/projected/ae52688b-6f7a-441f-927b-ab547b7ce44f-kube-api-access-zkq7z\") pod \"community-operators-bsxpd\" (UID: \"ae52688b-6f7a-441f-927b-ab547b7ce44f\") " pod="openshift-marketplace/community-operators-bsxpd" Jan 29 12:08:39 crc kubenswrapper[4753]: I0129 12:08:39.127377 4753 patch_prober.go:28] interesting pod/console-operator-58897d9998-znsp6 container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.14:8443/readyz\": dial tcp 10.217.0.14:8443: connect: connection refused" start-of-body= Jan 29 12:08:39 crc kubenswrapper[4753]: I0129 12:08:39.127419 4753 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-znsp6" podUID="f9d4b340-8ceb-41e1-8289-f48222e5d3d8" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.14:8443/readyz\": dial tcp 10.217.0.14:8443: connect: connection refused" Jan 29 12:08:39 crc kubenswrapper[4753]: I0129 12:08:39.127959 4753 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-8kz2s container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.26:5443/healthz\": dial tcp 10.217.0.26:5443: connect: connection refused" start-of-body= Jan 29 12:08:39 crc kubenswrapper[4753]: I0129 12:08:39.127985 4753 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-8kz2s" podUID="3cea010a-6482-47b3-9c79-45432b921956" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.26:5443/healthz\": dial tcp 10.217.0.26:5443: connect: connection refused" Jan 29 12:08:39 crc kubenswrapper[4753]: I0129 12:08:39.129381 4753 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-8kz2s container/packageserver namespace/openshift-operator-lifecycle-manager: Liveness probe status=failure output="Get \"https://10.217.0.26:5443/healthz\": dial tcp 10.217.0.26:5443: connect: connection refused" start-of-body= Jan 29 12:08:39 crc kubenswrapper[4753]: I0129 12:08:39.129406 4753 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-8kz2s" podUID="3cea010a-6482-47b3-9c79-45432b921956" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.26:5443/healthz\": dial tcp 10.217.0.26:5443: connect: connection refused" Jan 29 12:08:39 crc kubenswrapper[4753]: I0129 12:08:39.143314 4753 patch_prober.go:28] interesting pod/router-default-5444994796-jpxd9 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 29 12:08:39 crc kubenswrapper[4753]: [-]has-synced failed: reason withheld Jan 29 12:08:39 crc kubenswrapper[4753]: [+]process-running ok Jan 29 12:08:39 crc kubenswrapper[4753]: healthz check failed Jan 29 12:08:39 crc kubenswrapper[4753]: I0129 12:08:39.143401 4753 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-jpxd9" podUID="8e54e1ee-a1b8-454c-8b09-e252ca3fe9da" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 29 12:08:39 crc kubenswrapper[4753]: I0129 12:08:39.143623 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-879f6c89f-86kqx" Jan 29 12:08:39 crc kubenswrapper[4753]: I0129 12:08:39.252108 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-t89jd" Jan 29 12:08:39 crc kubenswrapper[4753]: I0129 12:08:39.252799 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:39 crc kubenswrapper[4753]: I0129 12:08:39.253221 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-62gc5\" (UniqueName: \"kubernetes.io/projected/574faad5-0a82-4ff3-b0a8-5390bfd3dc27-kube-api-access-62gc5\") pod \"certified-operators-5clxb\" (UID: \"574faad5-0a82-4ff3-b0a8-5390bfd3dc27\") " pod="openshift-marketplace/certified-operators-5clxb" Jan 29 12:08:39 crc kubenswrapper[4753]: I0129 12:08:39.253318 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/574faad5-0a82-4ff3-b0a8-5390bfd3dc27-catalog-content\") pod \"certified-operators-5clxb\" (UID: \"574faad5-0a82-4ff3-b0a8-5390bfd3dc27\") " pod="openshift-marketplace/certified-operators-5clxb" Jan 29 12:08:39 crc kubenswrapper[4753]: I0129 12:08:39.253456 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/574faad5-0a82-4ff3-b0a8-5390bfd3dc27-utilities\") pod \"certified-operators-5clxb\" (UID: \"574faad5-0a82-4ff3-b0a8-5390bfd3dc27\") " pod="openshift-marketplace/certified-operators-5clxb" Jan 29 12:08:39 crc kubenswrapper[4753]: I0129 12:08:39.216304 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-4bpqt" event={"ID":"b6e8630c-f357-4c2e-88d0-45218ea79981","Type":"ContainerStarted","Data":"830cb507cd8e51feba6170d7004eb46940110e0942d3ed7c8e141309cb3719db"} Jan 29 12:08:39 crc kubenswrapper[4753]: I0129 12:08:39.264372 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-5clxb"] Jan 29 12:08:39 crc kubenswrapper[4753]: E0129 12:08:39.418292 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:39.918260503 +0000 UTC m=+134.170341958 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:39 crc kubenswrapper[4753]: I0129 12:08:39.418669 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-lswml" podStartSLOduration=100.418638184 podStartE2EDuration="1m40.418638184s" podCreationTimestamp="2026-01-29 12:06:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:08:38.722534637 +0000 UTC m=+132.974616102" watchObservedRunningTime="2026-01-29 12:08:39.418638184 +0000 UTC m=+133.670719639" Jan 29 12:08:39 crc kubenswrapper[4753]: I0129 12:08:39.419317 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:39 crc kubenswrapper[4753]: I0129 12:08:39.419467 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-62gc5\" (UniqueName: \"kubernetes.io/projected/574faad5-0a82-4ff3-b0a8-5390bfd3dc27-kube-api-access-62gc5\") pod \"certified-operators-5clxb\" (UID: \"574faad5-0a82-4ff3-b0a8-5390bfd3dc27\") " pod="openshift-marketplace/certified-operators-5clxb" Jan 29 12:08:39 crc kubenswrapper[4753]: I0129 12:08:39.419535 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/574faad5-0a82-4ff3-b0a8-5390bfd3dc27-catalog-content\") pod \"certified-operators-5clxb\" (UID: \"574faad5-0a82-4ff3-b0a8-5390bfd3dc27\") " pod="openshift-marketplace/certified-operators-5clxb" Jan 29 12:08:39 crc kubenswrapper[4753]: I0129 12:08:39.419768 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-gkf8g" podStartSLOduration=101.419761447 podStartE2EDuration="1m41.419761447s" podCreationTimestamp="2026-01-29 12:06:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:08:38.788129816 +0000 UTC m=+133.040211281" watchObservedRunningTime="2026-01-29 12:08:39.419761447 +0000 UTC m=+133.671842902" Jan 29 12:08:39 crc kubenswrapper[4753]: I0129 12:08:39.420288 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/574faad5-0a82-4ff3-b0a8-5390bfd3dc27-catalog-content\") pod \"certified-operators-5clxb\" (UID: \"574faad5-0a82-4ff3-b0a8-5390bfd3dc27\") " pod="openshift-marketplace/certified-operators-5clxb" Jan 29 12:08:39 crc kubenswrapper[4753]: E0129 12:08:39.420690 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:39.920677504 +0000 UTC m=+134.172758959 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:39 crc kubenswrapper[4753]: I0129 12:08:39.422035 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-558db77b4-zdhjn" Jan 29 12:08:39 crc kubenswrapper[4753]: I0129 12:08:39.427463 4753 generic.go:334] "Generic (PLEG): container finished" podID="6912f3bb-d8bc-4dc1-b3f0-9e7e1e8d354a" containerID="7f6fe624e595421c0970172d2df2939901dc13e446071674e7351d6e46f32fcd" exitCode=0 Jan 29 12:08:39 crc kubenswrapper[4753]: I0129 12:08:39.451324 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-p7d5d" event={"ID":"6912f3bb-d8bc-4dc1-b3f0-9e7e1e8d354a","Type":"ContainerDied","Data":"7f6fe624e595421c0970172d2df2939901dc13e446071674e7351d6e46f32fcd"} Jan 29 12:08:39 crc kubenswrapper[4753]: I0129 12:08:39.451684 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-pcxth" Jan 29 12:08:39 crc kubenswrapper[4753]: I0129 12:08:39.633649 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:39 crc kubenswrapper[4753]: I0129 12:08:39.633988 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/574faad5-0a82-4ff3-b0a8-5390bfd3dc27-utilities\") pod \"certified-operators-5clxb\" (UID: \"574faad5-0a82-4ff3-b0a8-5390bfd3dc27\") " pod="openshift-marketplace/certified-operators-5clxb" Jan 29 12:08:39 crc kubenswrapper[4753]: E0129 12:08:39.635347 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:40.13532706 +0000 UTC m=+134.387408505 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:39 crc kubenswrapper[4753]: I0129 12:08:39.636332 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/574faad5-0a82-4ff3-b0a8-5390bfd3dc27-utilities\") pod \"certified-operators-5clxb\" (UID: \"574faad5-0a82-4ff3-b0a8-5390bfd3dc27\") " pod="openshift-marketplace/certified-operators-5clxb" Jan 29 12:08:39 crc kubenswrapper[4753]: I0129 12:08:39.646694 4753 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-zdhjn container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.12:6443/healthz\": dial tcp 10.217.0.12:6443: connect: connection refused" start-of-body= Jan 29 12:08:39 crc kubenswrapper[4753]: I0129 12:08:39.646804 4753 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-zdhjn" podUID="9728fd7e-6203-4082-9297-2d3fd9e17b74" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.12:6443/healthz\": dial tcp 10.217.0.12:6443: connect: connection refused" Jan 29 12:08:39 crc kubenswrapper[4753]: I0129 12:08:39.647841 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bsxpd" Jan 29 12:08:39 crc kubenswrapper[4753]: I0129 12:08:39.652068 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-62gc5\" (UniqueName: \"kubernetes.io/projected/574faad5-0a82-4ff3-b0a8-5390bfd3dc27-kube-api-access-62gc5\") pod \"certified-operators-5clxb\" (UID: \"574faad5-0a82-4ff3-b0a8-5390bfd3dc27\") " pod="openshift-marketplace/certified-operators-5clxb" Jan 29 12:08:39 crc kubenswrapper[4753]: I0129 12:08:39.770069 4753 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-79sb7 container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.33:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 29 12:08:39 crc kubenswrapper[4753]: I0129 12:08:39.770150 4753 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-79sb7" podUID="40dd9f5b-066f-4400-a4a1-6a7a9eda8c90" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.33:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 29 12:08:39 crc kubenswrapper[4753]: I0129 12:08:39.787023 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5clxb" Jan 29 12:08:39 crc kubenswrapper[4753]: I0129 12:08:39.788431 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:39 crc kubenswrapper[4753]: I0129 12:08:39.789355 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-gntl4" event={"ID":"c612cdf9-575c-4d80-9a48-93e32d5673ae","Type":"ContainerStarted","Data":"3e4ba5d691c2cfb225ef10647fff3482eff7df512f434e84c645b8098c913ffa"} Jan 29 12:08:39 crc kubenswrapper[4753]: E0129 12:08:39.792427 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:40.292406573 +0000 UTC m=+134.544488028 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:39 crc kubenswrapper[4753]: I0129 12:08:39.797858 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-lpjkw" event={"ID":"471bd5c8-e8c1-4078-8e32-c0c30a1b237b","Type":"ContainerStarted","Data":"94cd9e79a36a558606af3fd32d5cc6447e9527cc294746645022871825d7c32f"} Jan 29 12:08:39 crc kubenswrapper[4753]: I0129 12:08:39.873974 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-6b2dk" event={"ID":"7a2fedca-4a36-42ec-a6b6-63fcd2de6c07","Type":"ContainerStarted","Data":"9139dab5393bbcbdab584a03927f6ed4a23393aaa79cc530381a967f5d3b5cbd"} Jan 29 12:08:39 crc kubenswrapper[4753]: I0129 12:08:39.908723 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:39 crc kubenswrapper[4753]: E0129 12:08:39.909406 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:40.40935748 +0000 UTC m=+134.661438935 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:40 crc kubenswrapper[4753]: I0129 12:08:39.997368 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Jan 29 12:08:40 crc kubenswrapper[4753]: I0129 12:08:40.001476 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-b2rb9" event={"ID":"200ffbfe-dff4-45e2-944d-34a3ad56f018","Type":"ContainerStarted","Data":"57e3502968cdee1c7892834df8e019409c473c88421470a379ea0d888bc9c4dc"} Jan 29 12:08:40 crc kubenswrapper[4753]: I0129 12:08:40.001527 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-b2rb9" Jan 29 12:08:40 crc kubenswrapper[4753]: I0129 12:08:40.010550 4753 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-8kz2s container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.26:5443/healthz\": dial tcp 10.217.0.26:5443: connect: connection refused" start-of-body= Jan 29 12:08:40 crc kubenswrapper[4753]: I0129 12:08:40.010628 4753 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-8kz2s" podUID="3cea010a-6482-47b3-9c79-45432b921956" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.26:5443/healthz\": dial tcp 10.217.0.26:5443: connect: connection refused" Jan 29 12:08:40 crc kubenswrapper[4753]: I0129 12:08:40.014864 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:40 crc kubenswrapper[4753]: E0129 12:08:40.018447 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:40.518407744 +0000 UTC m=+134.770489199 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:40 crc kubenswrapper[4753]: I0129 12:08:40.214946 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:40 crc kubenswrapper[4753]: E0129 12:08:40.215486 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:40.715466359 +0000 UTC m=+134.967547814 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:40 crc kubenswrapper[4753]: I0129 12:08:40.221525 4753 patch_prober.go:28] interesting pod/router-default-5444994796-jpxd9 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 29 12:08:40 crc kubenswrapper[4753]: [-]has-synced failed: reason withheld Jan 29 12:08:40 crc kubenswrapper[4753]: [+]process-running ok Jan 29 12:08:40 crc kubenswrapper[4753]: healthz check failed Jan 29 12:08:40 crc kubenswrapper[4753]: I0129 12:08:40.221580 4753 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-jpxd9" podUID="8e54e1ee-a1b8-454c-8b09-e252ca3fe9da" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 29 12:08:40 crc kubenswrapper[4753]: I0129 12:08:40.243763 4753 patch_prober.go:28] interesting pod/downloads-7954f5f757-b2rb9 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" start-of-body= Jan 29 12:08:40 crc kubenswrapper[4753]: I0129 12:08:40.244239 4753 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-b2rb9" podUID="200ffbfe-dff4-45e2-944d-34a3ad56f018" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" Jan 29 12:08:40 crc kubenswrapper[4753]: I0129 12:08:40.318206 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-b52ds"] Jan 29 12:08:40 crc kubenswrapper[4753]: I0129 12:08:40.319863 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-b52ds" Jan 29 12:08:40 crc kubenswrapper[4753]: I0129 12:08:40.327478 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Jan 29 12:08:40 crc kubenswrapper[4753]: I0129 12:08:40.359326 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:40 crc kubenswrapper[4753]: E0129 12:08:40.359771 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:40.859758475 +0000 UTC m=+135.111839930 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:40 crc kubenswrapper[4753]: I0129 12:08:40.370612 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-dgs2s"] Jan 29 12:08:40 crc kubenswrapper[4753]: I0129 12:08:40.371927 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dgs2s" Jan 29 12:08:40 crc kubenswrapper[4753]: I0129 12:08:40.394448 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Jan 29 12:08:40 crc kubenswrapper[4753]: I0129 12:08:40.410348 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-b52ds"] Jan 29 12:08:40 crc kubenswrapper[4753]: I0129 12:08:40.483356 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:40 crc kubenswrapper[4753]: I0129 12:08:40.483562 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t7r8k\" (UniqueName: \"kubernetes.io/projected/13c02ae6-2d17-4b39-9f64-4a7a99a6ffc6-kube-api-access-t7r8k\") pod \"redhat-operators-dgs2s\" (UID: \"13c02ae6-2d17-4b39-9f64-4a7a99a6ffc6\") " pod="openshift-marketplace/redhat-operators-dgs2s" Jan 29 12:08:40 crc kubenswrapper[4753]: I0129 12:08:40.483602 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d818fc13-9863-4172-a818-4e01af393842-catalog-content\") pod \"redhat-marketplace-b52ds\" (UID: \"d818fc13-9863-4172-a818-4e01af393842\") " pod="openshift-marketplace/redhat-marketplace-b52ds" Jan 29 12:08:40 crc kubenswrapper[4753]: I0129 12:08:40.483673 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/13c02ae6-2d17-4b39-9f64-4a7a99a6ffc6-catalog-content\") pod \"redhat-operators-dgs2s\" (UID: \"13c02ae6-2d17-4b39-9f64-4a7a99a6ffc6\") " pod="openshift-marketplace/redhat-operators-dgs2s" Jan 29 12:08:40 crc kubenswrapper[4753]: I0129 12:08:40.483731 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2bhr9\" (UniqueName: \"kubernetes.io/projected/d818fc13-9863-4172-a818-4e01af393842-kube-api-access-2bhr9\") pod \"redhat-marketplace-b52ds\" (UID: \"d818fc13-9863-4172-a818-4e01af393842\") " pod="openshift-marketplace/redhat-marketplace-b52ds" Jan 29 12:08:40 crc kubenswrapper[4753]: I0129 12:08:40.483786 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d818fc13-9863-4172-a818-4e01af393842-utilities\") pod \"redhat-marketplace-b52ds\" (UID: \"d818fc13-9863-4172-a818-4e01af393842\") " pod="openshift-marketplace/redhat-marketplace-b52ds" Jan 29 12:08:40 crc kubenswrapper[4753]: I0129 12:08:40.483891 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/13c02ae6-2d17-4b39-9f64-4a7a99a6ffc6-utilities\") pod \"redhat-operators-dgs2s\" (UID: \"13c02ae6-2d17-4b39-9f64-4a7a99a6ffc6\") " pod="openshift-marketplace/redhat-operators-dgs2s" Jan 29 12:08:40 crc kubenswrapper[4753]: E0129 12:08:40.484035 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:40.984014727 +0000 UTC m=+135.236096182 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:40 crc kubenswrapper[4753]: I0129 12:08:40.596172 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t7r8k\" (UniqueName: \"kubernetes.io/projected/13c02ae6-2d17-4b39-9f64-4a7a99a6ffc6-kube-api-access-t7r8k\") pod \"redhat-operators-dgs2s\" (UID: \"13c02ae6-2d17-4b39-9f64-4a7a99a6ffc6\") " pod="openshift-marketplace/redhat-operators-dgs2s" Jan 29 12:08:40 crc kubenswrapper[4753]: I0129 12:08:40.596250 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d818fc13-9863-4172-a818-4e01af393842-catalog-content\") pod \"redhat-marketplace-b52ds\" (UID: \"d818fc13-9863-4172-a818-4e01af393842\") " pod="openshift-marketplace/redhat-marketplace-b52ds" Jan 29 12:08:40 crc kubenswrapper[4753]: I0129 12:08:40.596389 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/13c02ae6-2d17-4b39-9f64-4a7a99a6ffc6-catalog-content\") pod \"redhat-operators-dgs2s\" (UID: \"13c02ae6-2d17-4b39-9f64-4a7a99a6ffc6\") " pod="openshift-marketplace/redhat-operators-dgs2s" Jan 29 12:08:40 crc kubenswrapper[4753]: I0129 12:08:40.596417 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2bhr9\" (UniqueName: \"kubernetes.io/projected/d818fc13-9863-4172-a818-4e01af393842-kube-api-access-2bhr9\") pod \"redhat-marketplace-b52ds\" (UID: \"d818fc13-9863-4172-a818-4e01af393842\") " pod="openshift-marketplace/redhat-marketplace-b52ds" Jan 29 12:08:40 crc kubenswrapper[4753]: I0129 12:08:40.596482 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d818fc13-9863-4172-a818-4e01af393842-utilities\") pod \"redhat-marketplace-b52ds\" (UID: \"d818fc13-9863-4172-a818-4e01af393842\") " pod="openshift-marketplace/redhat-marketplace-b52ds" Jan 29 12:08:40 crc kubenswrapper[4753]: I0129 12:08:40.596549 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:40 crc kubenswrapper[4753]: I0129 12:08:40.596641 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/13c02ae6-2d17-4b39-9f64-4a7a99a6ffc6-utilities\") pod \"redhat-operators-dgs2s\" (UID: \"13c02ae6-2d17-4b39-9f64-4a7a99a6ffc6\") " pod="openshift-marketplace/redhat-operators-dgs2s" Jan 29 12:08:40 crc kubenswrapper[4753]: I0129 12:08:40.601919 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/13c02ae6-2d17-4b39-9f64-4a7a99a6ffc6-utilities\") pod \"redhat-operators-dgs2s\" (UID: \"13c02ae6-2d17-4b39-9f64-4a7a99a6ffc6\") " pod="openshift-marketplace/redhat-operators-dgs2s" Jan 29 12:08:40 crc kubenswrapper[4753]: I0129 12:08:40.604519 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d818fc13-9863-4172-a818-4e01af393842-catalog-content\") pod \"redhat-marketplace-b52ds\" (UID: \"d818fc13-9863-4172-a818-4e01af393842\") " pod="openshift-marketplace/redhat-marketplace-b52ds" Jan 29 12:08:40 crc kubenswrapper[4753]: I0129 12:08:40.604842 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/13c02ae6-2d17-4b39-9f64-4a7a99a6ffc6-catalog-content\") pod \"redhat-operators-dgs2s\" (UID: \"13c02ae6-2d17-4b39-9f64-4a7a99a6ffc6\") " pod="openshift-marketplace/redhat-operators-dgs2s" Jan 29 12:08:41 crc kubenswrapper[4753]: I0129 12:08:40.767796 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d818fc13-9863-4172-a818-4e01af393842-utilities\") pod \"redhat-marketplace-b52ds\" (UID: \"d818fc13-9863-4172-a818-4e01af393842\") " pod="openshift-marketplace/redhat-marketplace-b52ds" Jan 29 12:08:41 crc kubenswrapper[4753]: E0129 12:08:40.770462 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:41.270423174 +0000 UTC m=+135.522504629 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:41 crc kubenswrapper[4753]: I0129 12:08:40.773388 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-dgs2s"] Jan 29 12:08:41 crc kubenswrapper[4753]: I0129 12:08:40.864067 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-z6xk4"] Jan 29 12:08:41 crc kubenswrapper[4753]: I0129 12:08:40.871340 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:41 crc kubenswrapper[4753]: E0129 12:08:41.090065 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:41.590033762 +0000 UTC m=+135.842115217 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:41 crc kubenswrapper[4753]: I0129 12:08:41.095256 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t7r8k\" (UniqueName: \"kubernetes.io/projected/13c02ae6-2d17-4b39-9f64-4a7a99a6ffc6-kube-api-access-t7r8k\") pod \"redhat-operators-dgs2s\" (UID: \"13c02ae6-2d17-4b39-9f64-4a7a99a6ffc6\") " pod="openshift-marketplace/redhat-operators-dgs2s" Jan 29 12:08:41 crc kubenswrapper[4753]: I0129 12:08:41.097996 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-7dkr6"] Jan 29 12:08:41 crc kubenswrapper[4753]: I0129 12:08:41.100186 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-z6xk4" Jan 29 12:08:41 crc kubenswrapper[4753]: I0129 12:08:41.108894 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2bhr9\" (UniqueName: \"kubernetes.io/projected/d818fc13-9863-4172-a818-4e01af393842-kube-api-access-2bhr9\") pod \"redhat-marketplace-b52ds\" (UID: \"d818fc13-9863-4172-a818-4e01af393842\") " pod="openshift-marketplace/redhat-marketplace-b52ds" Jan 29 12:08:41 crc kubenswrapper[4753]: I0129 12:08:41.114118 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-7dkr6"] Jan 29 12:08:41 crc kubenswrapper[4753]: I0129 12:08:41.114178 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-z6xk4"] Jan 29 12:08:41 crc kubenswrapper[4753]: I0129 12:08:41.114336 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-7dkr6" Jan 29 12:08:41 crc kubenswrapper[4753]: I0129 12:08:41.130750 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:41 crc kubenswrapper[4753]: E0129 12:08:41.131418 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:41.631400665 +0000 UTC m=+135.883482120 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:43 crc kubenswrapper[4753]: I0129 12:08:41.642640 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:43 crc kubenswrapper[4753]: E0129 12:08:41.643711 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:42.643672987 +0000 UTC m=+136.895754432 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:43 crc kubenswrapper[4753]: I0129 12:08:41.646078 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-b52ds" Jan 29 12:08:43 crc kubenswrapper[4753]: I0129 12:08:41.651361 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-558db77b4-zdhjn" podStartSLOduration=104.651325663 podStartE2EDuration="1m44.651325663s" podCreationTimestamp="2026-01-29 12:06:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:08:41.643707998 +0000 UTC m=+135.895789473" watchObservedRunningTime="2026-01-29 12:08:41.651325663 +0000 UTC m=+135.903407118" Jan 29 12:08:43 crc kubenswrapper[4753]: I0129 12:08:41.661661 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-zdhjn" event={"ID":"9728fd7e-6203-4082-9297-2d3fd9e17b74","Type":"ContainerStarted","Data":"f00710772f44f4304d4013d4a1a5bcd7a1c5f1eef491b88d83a683fe1f1522f1"} Jan 29 12:08:43 crc kubenswrapper[4753]: I0129 12:08:41.663537 4753 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-zdhjn container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.12:6443/healthz\": dial tcp 10.217.0.12:6443: connect: connection refused" start-of-body= Jan 29 12:08:43 crc kubenswrapper[4753]: I0129 12:08:41.663635 4753 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-zdhjn" podUID="9728fd7e-6203-4082-9297-2d3fd9e17b74" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.12:6443/healthz\": dial tcp 10.217.0.12:6443: connect: connection refused" Jan 29 12:08:43 crc kubenswrapper[4753]: I0129 12:08:42.035276 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:43 crc kubenswrapper[4753]: I0129 12:08:42.035743 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jqg9m\" (UniqueName: \"kubernetes.io/projected/318aa5db-6b19-4efe-8c5d-00fbb4a84b13-kube-api-access-jqg9m\") pod \"redhat-marketplace-7dkr6\" (UID: \"318aa5db-6b19-4efe-8c5d-00fbb4a84b13\") " pod="openshift-marketplace/redhat-marketplace-7dkr6" Jan 29 12:08:43 crc kubenswrapper[4753]: I0129 12:08:42.035826 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d0809beb-ae87-4bf7-aa2d-20dbe819c3cc-catalog-content\") pod \"redhat-operators-z6xk4\" (UID: \"d0809beb-ae87-4bf7-aa2d-20dbe819c3cc\") " pod="openshift-marketplace/redhat-operators-z6xk4" Jan 29 12:08:43 crc kubenswrapper[4753]: I0129 12:08:42.035858 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/318aa5db-6b19-4efe-8c5d-00fbb4a84b13-utilities\") pod \"redhat-marketplace-7dkr6\" (UID: \"318aa5db-6b19-4efe-8c5d-00fbb4a84b13\") " pod="openshift-marketplace/redhat-marketplace-7dkr6" Jan 29 12:08:43 crc kubenswrapper[4753]: I0129 12:08:42.035917 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gmml4\" (UniqueName: \"kubernetes.io/projected/d0809beb-ae87-4bf7-aa2d-20dbe819c3cc-kube-api-access-gmml4\") pod \"redhat-operators-z6xk4\" (UID: \"d0809beb-ae87-4bf7-aa2d-20dbe819c3cc\") " pod="openshift-marketplace/redhat-operators-z6xk4" Jan 29 12:08:43 crc kubenswrapper[4753]: I0129 12:08:42.035942 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/318aa5db-6b19-4efe-8c5d-00fbb4a84b13-catalog-content\") pod \"redhat-marketplace-7dkr6\" (UID: \"318aa5db-6b19-4efe-8c5d-00fbb4a84b13\") " pod="openshift-marketplace/redhat-marketplace-7dkr6" Jan 29 12:08:43 crc kubenswrapper[4753]: I0129 12:08:42.035977 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d0809beb-ae87-4bf7-aa2d-20dbe819c3cc-utilities\") pod \"redhat-operators-z6xk4\" (UID: \"d0809beb-ae87-4bf7-aa2d-20dbe819c3cc\") " pod="openshift-marketplace/redhat-operators-z6xk4" Jan 29 12:08:43 crc kubenswrapper[4753]: I0129 12:08:42.180305 4753 patch_prober.go:28] interesting pod/router-default-5444994796-jpxd9 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 29 12:08:43 crc kubenswrapper[4753]: [-]has-synced failed: reason withheld Jan 29 12:08:43 crc kubenswrapper[4753]: [+]process-running ok Jan 29 12:08:43 crc kubenswrapper[4753]: healthz check failed Jan 29 12:08:43 crc kubenswrapper[4753]: I0129 12:08:42.181090 4753 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-jpxd9" podUID="8e54e1ee-a1b8-454c-8b09-e252ca3fe9da" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 29 12:08:43 crc kubenswrapper[4753]: E0129 12:08:42.175460 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:42.675422846 +0000 UTC m=+136.927504301 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:43 crc kubenswrapper[4753]: I0129 12:08:42.227794 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jqg9m\" (UniqueName: \"kubernetes.io/projected/318aa5db-6b19-4efe-8c5d-00fbb4a84b13-kube-api-access-jqg9m\") pod \"redhat-marketplace-7dkr6\" (UID: \"318aa5db-6b19-4efe-8c5d-00fbb4a84b13\") " pod="openshift-marketplace/redhat-marketplace-7dkr6" Jan 29 12:08:43 crc kubenswrapper[4753]: I0129 12:08:42.227854 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:43 crc kubenswrapper[4753]: I0129 12:08:42.227895 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d0809beb-ae87-4bf7-aa2d-20dbe819c3cc-catalog-content\") pod \"redhat-operators-z6xk4\" (UID: \"d0809beb-ae87-4bf7-aa2d-20dbe819c3cc\") " pod="openshift-marketplace/redhat-operators-z6xk4" Jan 29 12:08:43 crc kubenswrapper[4753]: I0129 12:08:42.227926 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/318aa5db-6b19-4efe-8c5d-00fbb4a84b13-utilities\") pod \"redhat-marketplace-7dkr6\" (UID: \"318aa5db-6b19-4efe-8c5d-00fbb4a84b13\") " pod="openshift-marketplace/redhat-marketplace-7dkr6" Jan 29 12:08:43 crc kubenswrapper[4753]: I0129 12:08:42.227994 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gmml4\" (UniqueName: \"kubernetes.io/projected/d0809beb-ae87-4bf7-aa2d-20dbe819c3cc-kube-api-access-gmml4\") pod \"redhat-operators-z6xk4\" (UID: \"d0809beb-ae87-4bf7-aa2d-20dbe819c3cc\") " pod="openshift-marketplace/redhat-operators-z6xk4" Jan 29 12:08:43 crc kubenswrapper[4753]: I0129 12:08:42.228012 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/318aa5db-6b19-4efe-8c5d-00fbb4a84b13-catalog-content\") pod \"redhat-marketplace-7dkr6\" (UID: \"318aa5db-6b19-4efe-8c5d-00fbb4a84b13\") " pod="openshift-marketplace/redhat-marketplace-7dkr6" Jan 29 12:08:43 crc kubenswrapper[4753]: I0129 12:08:42.228058 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d0809beb-ae87-4bf7-aa2d-20dbe819c3cc-utilities\") pod \"redhat-operators-z6xk4\" (UID: \"d0809beb-ae87-4bf7-aa2d-20dbe819c3cc\") " pod="openshift-marketplace/redhat-operators-z6xk4" Jan 29 12:08:43 crc kubenswrapper[4753]: I0129 12:08:42.228647 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d0809beb-ae87-4bf7-aa2d-20dbe819c3cc-utilities\") pod \"redhat-operators-z6xk4\" (UID: \"d0809beb-ae87-4bf7-aa2d-20dbe819c3cc\") " pod="openshift-marketplace/redhat-operators-z6xk4" Jan 29 12:08:43 crc kubenswrapper[4753]: E0129 12:08:42.229404 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:42.729392081 +0000 UTC m=+136.981473536 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:43 crc kubenswrapper[4753]: I0129 12:08:42.229645 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d0809beb-ae87-4bf7-aa2d-20dbe819c3cc-catalog-content\") pod \"redhat-operators-z6xk4\" (UID: \"d0809beb-ae87-4bf7-aa2d-20dbe819c3cc\") " pod="openshift-marketplace/redhat-operators-z6xk4" Jan 29 12:08:43 crc kubenswrapper[4753]: I0129 12:08:42.268747 4753 patch_prober.go:28] interesting pod/router-default-5444994796-jpxd9 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 29 12:08:43 crc kubenswrapper[4753]: [-]has-synced failed: reason withheld Jan 29 12:08:43 crc kubenswrapper[4753]: [+]process-running ok Jan 29 12:08:43 crc kubenswrapper[4753]: healthz check failed Jan 29 12:08:43 crc kubenswrapper[4753]: I0129 12:08:42.268821 4753 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-jpxd9" podUID="8e54e1ee-a1b8-454c-8b09-e252ca3fe9da" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 29 12:08:43 crc kubenswrapper[4753]: I0129 12:08:42.279598 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/318aa5db-6b19-4efe-8c5d-00fbb4a84b13-catalog-content\") pod \"redhat-marketplace-7dkr6\" (UID: \"318aa5db-6b19-4efe-8c5d-00fbb4a84b13\") " pod="openshift-marketplace/redhat-marketplace-7dkr6" Jan 29 12:08:43 crc kubenswrapper[4753]: I0129 12:08:42.279598 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/318aa5db-6b19-4efe-8c5d-00fbb4a84b13-utilities\") pod \"redhat-marketplace-7dkr6\" (UID: \"318aa5db-6b19-4efe-8c5d-00fbb4a84b13\") " pod="openshift-marketplace/redhat-marketplace-7dkr6" Jan 29 12:08:43 crc kubenswrapper[4753]: I0129 12:08:42.351952 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gmml4\" (UniqueName: \"kubernetes.io/projected/d0809beb-ae87-4bf7-aa2d-20dbe819c3cc-kube-api-access-gmml4\") pod \"redhat-operators-z6xk4\" (UID: \"d0809beb-ae87-4bf7-aa2d-20dbe819c3cc\") " pod="openshift-marketplace/redhat-operators-z6xk4" Jan 29 12:08:43 crc kubenswrapper[4753]: I0129 12:08:42.365385 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:43 crc kubenswrapper[4753]: E0129 12:08:42.366068 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:42.866048711 +0000 UTC m=+137.118130166 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:43 crc kubenswrapper[4753]: I0129 12:08:42.374879 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jqg9m\" (UniqueName: \"kubernetes.io/projected/318aa5db-6b19-4efe-8c5d-00fbb4a84b13-kube-api-access-jqg9m\") pod \"redhat-marketplace-7dkr6\" (UID: \"318aa5db-6b19-4efe-8c5d-00fbb4a84b13\") " pod="openshift-marketplace/redhat-marketplace-7dkr6" Jan 29 12:08:43 crc kubenswrapper[4753]: I0129 12:08:42.425534 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-4bpqt" podStartSLOduration=104.425512399 podStartE2EDuration="1m44.425512399s" podCreationTimestamp="2026-01-29 12:06:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:08:42.422058777 +0000 UTC m=+136.674140232" watchObservedRunningTime="2026-01-29 12:08:42.425512399 +0000 UTC m=+136.677593854" Jan 29 12:08:43 crc kubenswrapper[4753]: I0129 12:08:42.474531 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:43 crc kubenswrapper[4753]: E0129 12:08:42.475340 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:42.975319681 +0000 UTC m=+137.227401136 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:43 crc kubenswrapper[4753]: I0129 12:08:42.538277 4753 patch_prober.go:28] interesting pod/downloads-7954f5f757-b2rb9 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" start-of-body= Jan 29 12:08:43 crc kubenswrapper[4753]: I0129 12:08:42.538692 4753 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-b2rb9" podUID="200ffbfe-dff4-45e2-944d-34a3ad56f018" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" Jan 29 12:08:43 crc kubenswrapper[4753]: I0129 12:08:42.539693 4753 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-lswml container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.38:8080/healthz\": dial tcp 10.217.0.38:8080: connect: connection refused" start-of-body= Jan 29 12:08:43 crc kubenswrapper[4753]: I0129 12:08:42.539732 4753 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-lswml" podUID="dad90295-65db-470c-8041-19fcf86d0439" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.38:8080/healthz\": dial tcp 10.217.0.38:8080: connect: connection refused" Jan 29 12:08:43 crc kubenswrapper[4753]: I0129 12:08:42.559903 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-779px" podStartSLOduration=103.559873081 podStartE2EDuration="1m43.559873081s" podCreationTimestamp="2026-01-29 12:06:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:08:42.47629608 +0000 UTC m=+136.728377535" watchObservedRunningTime="2026-01-29 12:08:42.559873081 +0000 UTC m=+136.811954536" Jan 29 12:08:43 crc kubenswrapper[4753]: I0129 12:08:42.686111 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:43 crc kubenswrapper[4753]: E0129 12:08:42.687263 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:43.187216965 +0000 UTC m=+137.439298420 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:43 crc kubenswrapper[4753]: I0129 12:08:42.794135 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:43 crc kubenswrapper[4753]: E0129 12:08:42.884184 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:43.384150037 +0000 UTC m=+137.636231492 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:43 crc kubenswrapper[4753]: I0129 12:08:42.908247 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:43 crc kubenswrapper[4753]: E0129 12:08:42.909060 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:43.409031482 +0000 UTC m=+137.661112937 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:43 crc kubenswrapper[4753]: I0129 12:08:43.016116 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:43 crc kubenswrapper[4753]: E0129 12:08:43.017018 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:43.516994974 +0000 UTC m=+137.769076429 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:43 crc kubenswrapper[4753]: I0129 12:08:43.120734 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:43 crc kubenswrapper[4753]: E0129 12:08:43.121615 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:43.621583455 +0000 UTC m=+137.873664900 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:43 crc kubenswrapper[4753]: I0129 12:08:43.146585 4753 patch_prober.go:28] interesting pod/router-default-5444994796-jpxd9 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 29 12:08:43 crc kubenswrapper[4753]: [-]has-synced failed: reason withheld Jan 29 12:08:43 crc kubenswrapper[4753]: [+]process-running ok Jan 29 12:08:43 crc kubenswrapper[4753]: healthz check failed Jan 29 12:08:43 crc kubenswrapper[4753]: I0129 12:08:43.146671 4753 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-jpxd9" podUID="8e54e1ee-a1b8-454c-8b09-e252ca3fe9da" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 29 12:08:43 crc kubenswrapper[4753]: I0129 12:08:43.177295 4753 patch_prober.go:28] interesting pod/apiserver-76f77b778f-nrt2l container/openshift-apiserver namespace/openshift-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Jan 29 12:08:43 crc kubenswrapper[4753]: [+]log ok Jan 29 12:08:43 crc kubenswrapper[4753]: [+]etcd ok Jan 29 12:08:43 crc kubenswrapper[4753]: [+]poststarthook/start-apiserver-admission-initializer ok Jan 29 12:08:43 crc kubenswrapper[4753]: [+]poststarthook/generic-apiserver-start-informers ok Jan 29 12:08:43 crc kubenswrapper[4753]: [+]poststarthook/max-in-flight-filter ok Jan 29 12:08:43 crc kubenswrapper[4753]: [+]poststarthook/storage-object-count-tracker-hook ok Jan 29 12:08:43 crc kubenswrapper[4753]: [+]poststarthook/image.openshift.io-apiserver-caches ok Jan 29 12:08:43 crc kubenswrapper[4753]: [-]poststarthook/authorization.openshift.io-bootstrapclusterroles failed: reason withheld Jan 29 12:08:43 crc kubenswrapper[4753]: [-]poststarthook/authorization.openshift.io-ensurenodebootstrap-sa failed: reason withheld Jan 29 12:08:43 crc kubenswrapper[4753]: [+]poststarthook/project.openshift.io-projectcache ok Jan 29 12:08:43 crc kubenswrapper[4753]: [+]poststarthook/project.openshift.io-projectauthorizationcache ok Jan 29 12:08:43 crc kubenswrapper[4753]: [+]poststarthook/openshift.io-startinformers ok Jan 29 12:08:43 crc kubenswrapper[4753]: [+]poststarthook/openshift.io-restmapperupdater ok Jan 29 12:08:43 crc kubenswrapper[4753]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Jan 29 12:08:43 crc kubenswrapper[4753]: livez check failed Jan 29 12:08:43 crc kubenswrapper[4753]: I0129 12:08:43.177356 4753 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-apiserver/apiserver-76f77b778f-nrt2l" podUID="86659644-dec3-4e1a-ba32-5b4487a2f4c8" containerName="openshift-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 29 12:08:43 crc kubenswrapper[4753]: I0129 12:08:43.216348 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-j4xlb" podStartSLOduration=104.216320066 podStartE2EDuration="1m44.216320066s" podCreationTimestamp="2026-01-29 12:06:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:08:43.213045149 +0000 UTC m=+137.465126624" watchObservedRunningTime="2026-01-29 12:08:43.216320066 +0000 UTC m=+137.468401521" Jan 29 12:08:43 crc kubenswrapper[4753]: I0129 12:08:43.217327 4753 patch_prober.go:28] interesting pod/apiserver-76f77b778f-nrt2l container/openshift-apiserver namespace/openshift-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Jan 29 12:08:43 crc kubenswrapper[4753]: [+]log ok Jan 29 12:08:43 crc kubenswrapper[4753]: [+]etcd ok Jan 29 12:08:43 crc kubenswrapper[4753]: [+]poststarthook/start-apiserver-admission-initializer ok Jan 29 12:08:43 crc kubenswrapper[4753]: [+]poststarthook/generic-apiserver-start-informers ok Jan 29 12:08:43 crc kubenswrapper[4753]: [+]poststarthook/max-in-flight-filter ok Jan 29 12:08:43 crc kubenswrapper[4753]: [+]poststarthook/storage-object-count-tracker-hook ok Jan 29 12:08:43 crc kubenswrapper[4753]: [+]poststarthook/image.openshift.io-apiserver-caches ok Jan 29 12:08:43 crc kubenswrapper[4753]: [-]poststarthook/authorization.openshift.io-bootstrapclusterroles failed: reason withheld Jan 29 12:08:43 crc kubenswrapper[4753]: [-]poststarthook/authorization.openshift.io-ensurenodebootstrap-sa failed: reason withheld Jan 29 12:08:43 crc kubenswrapper[4753]: [+]poststarthook/project.openshift.io-projectcache ok Jan 29 12:08:43 crc kubenswrapper[4753]: [+]poststarthook/project.openshift.io-projectauthorizationcache ok Jan 29 12:08:43 crc kubenswrapper[4753]: [+]poststarthook/openshift.io-startinformers ok Jan 29 12:08:43 crc kubenswrapper[4753]: [+]poststarthook/openshift.io-restmapperupdater ok Jan 29 12:08:43 crc kubenswrapper[4753]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Jan 29 12:08:43 crc kubenswrapper[4753]: livez check failed Jan 29 12:08:43 crc kubenswrapper[4753]: I0129 12:08:43.217413 4753 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-apiserver/apiserver-76f77b778f-nrt2l" podUID="86659644-dec3-4e1a-ba32-5b4487a2f4c8" containerName="openshift-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 29 12:08:43 crc kubenswrapper[4753]: I0129 12:08:43.224373 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:43 crc kubenswrapper[4753]: E0129 12:08:43.227356 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:43.727325951 +0000 UTC m=+137.979407486 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:43 crc kubenswrapper[4753]: I0129 12:08:43.347608 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:43 crc kubenswrapper[4753]: E0129 12:08:43.348121 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:43.84804341 +0000 UTC m=+138.100124865 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:43 crc kubenswrapper[4753]: I0129 12:08:43.348324 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:43 crc kubenswrapper[4753]: E0129 12:08:43.348891 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:43.848877385 +0000 UTC m=+138.100958840 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:43 crc kubenswrapper[4753]: I0129 12:08:43.450116 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:43 crc kubenswrapper[4753]: E0129 12:08:43.450550 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:43.950528069 +0000 UTC m=+138.202609524 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:43 crc kubenswrapper[4753]: I0129 12:08:43.694941 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:43 crc kubenswrapper[4753]: E0129 12:08:43.695622 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:44.195603974 +0000 UTC m=+138.447685429 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:43 crc kubenswrapper[4753]: I0129 12:08:43.747321 4753 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-zdhjn container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.12:6443/healthz\": dial tcp 10.217.0.12:6443: connect: connection refused" start-of-body= Jan 29 12:08:43 crc kubenswrapper[4753]: I0129 12:08:43.747366 4753 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-zdhjn" podUID="9728fd7e-6203-4082-9297-2d3fd9e17b74" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.12:6443/healthz\": dial tcp 10.217.0.12:6443: connect: connection refused" Jan 29 12:08:43 crc kubenswrapper[4753]: I0129 12:08:43.908546 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:43 crc kubenswrapper[4753]: E0129 12:08:43.914860 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:44.414806233 +0000 UTC m=+138.666887688 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:44 crc kubenswrapper[4753]: I0129 12:08:44.017354 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:44 crc kubenswrapper[4753]: E0129 12:08:44.018182 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:44.518139068 +0000 UTC m=+138.770220523 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:44 crc kubenswrapper[4753]: I0129 12:08:44.127597 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:44 crc kubenswrapper[4753]: E0129 12:08:44.136361 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:44.636338062 +0000 UTC m=+138.888419517 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:44 crc kubenswrapper[4753]: I0129 12:08:44.136528 4753 patch_prober.go:28] interesting pod/router-default-5444994796-jpxd9 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 29 12:08:44 crc kubenswrapper[4753]: [-]has-synced failed: reason withheld Jan 29 12:08:44 crc kubenswrapper[4753]: [+]process-running ok Jan 29 12:08:44 crc kubenswrapper[4753]: healthz check failed Jan 29 12:08:44 crc kubenswrapper[4753]: I0129 12:08:44.136588 4753 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-jpxd9" podUID="8e54e1ee-a1b8-454c-8b09-e252ca3fe9da" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 29 12:08:44 crc kubenswrapper[4753]: I0129 12:08:44.266555 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:44 crc kubenswrapper[4753]: E0129 12:08:44.275152 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:44.775125415 +0000 UTC m=+139.027206870 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:44 crc kubenswrapper[4753]: E0129 12:08:44.338084 4753 kubelet.go:2526] "Housekeeping took longer than expected" err="housekeeping took too long" expected="1s" actual="2.302s" Jan 29 12:08:44 crc kubenswrapper[4753]: I0129 12:08:44.373938 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:44 crc kubenswrapper[4753]: E0129 12:08:44.376128 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:44.876067409 +0000 UTC m=+139.128148864 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:44 crc kubenswrapper[4753]: I0129 12:08:44.504739 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/network-metrics-daemon-p6m5g" podStartSLOduration=106.504708221 podStartE2EDuration="1m46.504708221s" podCreationTimestamp="2026-01-29 12:06:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:08:44.162475485 +0000 UTC m=+138.414556960" watchObservedRunningTime="2026-01-29 12:08:44.504708221 +0000 UTC m=+138.756789666" Jan 29 12:08:44 crc kubenswrapper[4753]: I0129 12:08:44.509382 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:44 crc kubenswrapper[4753]: E0129 12:08:44.510032 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:45.010006868 +0000 UTC m=+139.262088323 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:44 crc kubenswrapper[4753]: I0129 12:08:44.511065 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/downloads-7954f5f757-b2rb9" podStartSLOduration=106.511044969 podStartE2EDuration="1m46.511044969s" podCreationTimestamp="2026-01-29 12:06:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:08:44.32435143 +0000 UTC m=+138.576432895" watchObservedRunningTime="2026-01-29 12:08:44.511044969 +0000 UTC m=+138.763126424" Jan 29 12:08:44 crc kubenswrapper[4753]: I0129 12:08:44.528642 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dgs2s" Jan 29 12:08:44 crc kubenswrapper[4753]: I0129 12:08:44.718321 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:44 crc kubenswrapper[4753]: E0129 12:08:44.718903 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:45.218879913 +0000 UTC m=+139.470961368 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:44 crc kubenswrapper[4753]: I0129 12:08:44.732782 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-dns/dns-default-jrblr" Jan 29 12:08:44 crc kubenswrapper[4753]: I0129 12:08:44.733065 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"6110c9b0-6745-4468-b8c6-02277a67883c","Type":"ContainerStarted","Data":"5bb90e4ba3a27532b352b2891a9630fc731f8e5407fb088959cd760e0198b677"} Jan 29 12:08:44 crc kubenswrapper[4753]: I0129 12:08:44.733175 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-p6m5g" event={"ID":"2412fa08-e643-4225-b494-eb999ea93fce","Type":"ContainerStarted","Data":"8fd67de5e84f2051f0bb6bb0960cd6c2890f59410632b3425f0383bbcb72b80d"} Jan 29 12:08:44 crc kubenswrapper[4753]: I0129 12:08:44.733260 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-jrblr" event={"ID":"e63ba05b-c0ce-43bc-b660-f5d93a3c45a5","Type":"ContainerStarted","Data":"adf8abd582d4b8823b61ef3e228181d4836aab875399d997dbbc2a342029f997"} Jan 29 12:08:44 crc kubenswrapper[4753]: I0129 12:08:44.734962 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-jrblr" event={"ID":"e63ba05b-c0ce-43bc-b660-f5d93a3c45a5","Type":"ContainerStarted","Data":"906aefc11de27f36398978367cd52b43857521844e06d9cb98793bcde2253614"} Jan 29 12:08:44 crc kubenswrapper[4753]: I0129 12:08:44.823420 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:44 crc kubenswrapper[4753]: E0129 12:08:44.827786 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:45.327768172 +0000 UTC m=+139.579849627 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:44 crc kubenswrapper[4753]: I0129 12:08:44.914331 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-6b2dk" podStartSLOduration=105.914302819 podStartE2EDuration="1m45.914302819s" podCreationTimestamp="2026-01-29 12:06:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:08:44.56756301 +0000 UTC m=+138.819644485" watchObservedRunningTime="2026-01-29 12:08:44.914302819 +0000 UTC m=+139.166384274" Jan 29 12:08:44 crc kubenswrapper[4753]: I0129 12:08:44.927441 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:44 crc kubenswrapper[4753]: E0129 12:08:44.928117 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:45.428080607 +0000 UTC m=+139.680162062 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:45 crc kubenswrapper[4753]: I0129 12:08:45.030667 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:45 crc kubenswrapper[4753]: E0129 12:08:45.031458 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:45.531421022 +0000 UTC m=+139.783502577 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:45 crc kubenswrapper[4753]: I0129 12:08:45.132456 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:45 crc kubenswrapper[4753]: E0129 12:08:45.135262 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:45.635194619 +0000 UTC m=+139.887276094 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:45 crc kubenswrapper[4753]: I0129 12:08:45.153058 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:45 crc kubenswrapper[4753]: I0129 12:08:45.141447 4753 patch_prober.go:28] interesting pod/router-default-5444994796-jpxd9 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 29 12:08:45 crc kubenswrapper[4753]: [-]has-synced failed: reason withheld Jan 29 12:08:45 crc kubenswrapper[4753]: [+]process-running ok Jan 29 12:08:45 crc kubenswrapper[4753]: healthz check failed Jan 29 12:08:45 crc kubenswrapper[4753]: I0129 12:08:45.160966 4753 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-jpxd9" podUID="8e54e1ee-a1b8-454c-8b09-e252ca3fe9da" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 29 12:08:45 crc kubenswrapper[4753]: E0129 12:08:45.178395 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:45.678334303 +0000 UTC m=+139.930415758 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:45 crc kubenswrapper[4753]: I0129 12:08:45.257552 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:45 crc kubenswrapper[4753]: E0129 12:08:45.258275 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:45.758253236 +0000 UTC m=+140.010334691 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:45 crc kubenswrapper[4753]: I0129 12:08:45.312916 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-z6xk4" Jan 29 12:08:45 crc kubenswrapper[4753]: I0129 12:08:45.359665 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-7dkr6" Jan 29 12:08:45 crc kubenswrapper[4753]: I0129 12:08:45.375630 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:45 crc kubenswrapper[4753]: E0129 12:08:45.378849 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:45.87882256 +0000 UTC m=+140.130904205 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:45 crc kubenswrapper[4753]: I0129 12:08:45.485040 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd-operator/etcd-operator-b45778765-lpjkw" podStartSLOduration=107.484987379 podStartE2EDuration="1m47.484987379s" podCreationTimestamp="2026-01-29 12:06:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:08:44.926584612 +0000 UTC m=+139.178666067" watchObservedRunningTime="2026-01-29 12:08:45.484987379 +0000 UTC m=+139.737068834" Jan 29 12:08:45 crc kubenswrapper[4753]: I0129 12:08:45.490040 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:45 crc kubenswrapper[4753]: E0129 12:08:45.491046 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:45.991012827 +0000 UTC m=+140.243094282 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:45 crc kubenswrapper[4753]: I0129 12:08:45.495391 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/dns-default-jrblr" podStartSLOduration=30.495366055 podStartE2EDuration="30.495366055s" podCreationTimestamp="2026-01-29 12:08:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:08:45.462735861 +0000 UTC m=+139.714817316" watchObservedRunningTime="2026-01-29 12:08:45.495366055 +0000 UTC m=+139.747447520" Jan 29 12:08:45 crc kubenswrapper[4753]: I0129 12:08:45.593989 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:45 crc kubenswrapper[4753]: E0129 12:08:45.594948 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:46.094919168 +0000 UTC m=+140.347000623 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:45 crc kubenswrapper[4753]: I0129 12:08:45.763152 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:45 crc kubenswrapper[4753]: E0129 12:08:45.764319 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:46.264253024 +0000 UTC m=+140.516334489 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:45 crc kubenswrapper[4753]: I0129 12:08:45.888735 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:45 crc kubenswrapper[4753]: E0129 12:08:45.889624 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:46.389596199 +0000 UTC m=+140.641677654 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:46 crc kubenswrapper[4753]: I0129 12:08:46.046585 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:46 crc kubenswrapper[4753]: E0129 12:08:46.047052 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:46.547024923 +0000 UTC m=+140.799106378 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:46 crc kubenswrapper[4753]: I0129 12:08:46.151262 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:46 crc kubenswrapper[4753]: E0129 12:08:46.151825 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:46.65180545 +0000 UTC m=+140.903886905 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:46 crc kubenswrapper[4753]: I0129 12:08:46.282610 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:46 crc kubenswrapper[4753]: E0129 12:08:46.282974 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:46.782955957 +0000 UTC m=+141.035037412 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:46 crc kubenswrapper[4753]: I0129 12:08:46.311593 4753 patch_prober.go:28] interesting pod/router-default-5444994796-jpxd9 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 29 12:08:46 crc kubenswrapper[4753]: [-]has-synced failed: reason withheld Jan 29 12:08:46 crc kubenswrapper[4753]: [+]process-running ok Jan 29 12:08:46 crc kubenswrapper[4753]: healthz check failed Jan 29 12:08:46 crc kubenswrapper[4753]: I0129 12:08:46.311684 4753 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-jpxd9" podUID="8e54e1ee-a1b8-454c-8b09-e252ca3fe9da" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 29 12:08:46 crc kubenswrapper[4753]: I0129 12:08:46.342825 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Jan 29 12:08:46 crc kubenswrapper[4753]: I0129 12:08:46.363559 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-5clxb"] Jan 29 12:08:46 crc kubenswrapper[4753]: I0129 12:08:46.384697 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:46 crc kubenswrapper[4753]: E0129 12:08:46.385651 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:46.885622922 +0000 UTC m=+141.137704377 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:46 crc kubenswrapper[4753]: I0129 12:08:46.486338 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:46 crc kubenswrapper[4753]: E0129 12:08:46.486765 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:46.986746781 +0000 UTC m=+141.238828236 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:46 crc kubenswrapper[4753]: I0129 12:08:46.629027 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:46 crc kubenswrapper[4753]: E0129 12:08:46.630603 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:47.130570813 +0000 UTC m=+141.382652268 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:46 crc kubenswrapper[4753]: I0129 12:08:46.651320 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-t89jd"] Jan 29 12:08:46 crc kubenswrapper[4753]: I0129 12:08:46.673109 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-b52ds"] Jan 29 12:08:46 crc kubenswrapper[4753]: I0129 12:08:46.675590 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-bsxpd"] Jan 29 12:08:46 crc kubenswrapper[4753]: W0129 12:08:46.717363 4753 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd818fc13_9863_4172_a818_4e01af393842.slice/crio-5e5726067fab07574a284c6283182c0aebd5cf06746b30d83cdb1ab5d1846519 WatchSource:0}: Error finding container 5e5726067fab07574a284c6283182c0aebd5cf06746b30d83cdb1ab5d1846519: Status 404 returned error can't find the container with id 5e5726067fab07574a284c6283182c0aebd5cf06746b30d83cdb1ab5d1846519 Jan 29 12:08:46 crc kubenswrapper[4753]: I0129 12:08:46.730970 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:46 crc kubenswrapper[4753]: E0129 12:08:46.731473 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:47.231451255 +0000 UTC m=+141.483532710 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:46 crc kubenswrapper[4753]: I0129 12:08:46.865918 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:46 crc kubenswrapper[4753]: E0129 12:08:46.877186 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:47.377159152 +0000 UTC m=+141.629240607 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:46 crc kubenswrapper[4753]: I0129 12:08:46.891423 4753 patch_prober.go:28] interesting pod/apiserver-76f77b778f-nrt2l container/openshift-apiserver namespace/openshift-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Jan 29 12:08:46 crc kubenswrapper[4753]: [+]log ok Jan 29 12:08:46 crc kubenswrapper[4753]: [+]etcd ok Jan 29 12:08:46 crc kubenswrapper[4753]: [+]poststarthook/start-apiserver-admission-initializer ok Jan 29 12:08:46 crc kubenswrapper[4753]: [+]poststarthook/generic-apiserver-start-informers ok Jan 29 12:08:46 crc kubenswrapper[4753]: [+]poststarthook/max-in-flight-filter ok Jan 29 12:08:46 crc kubenswrapper[4753]: [+]poststarthook/storage-object-count-tracker-hook ok Jan 29 12:08:46 crc kubenswrapper[4753]: [+]poststarthook/image.openshift.io-apiserver-caches ok Jan 29 12:08:46 crc kubenswrapper[4753]: [-]poststarthook/authorization.openshift.io-bootstrapclusterroles failed: reason withheld Jan 29 12:08:46 crc kubenswrapper[4753]: [+]poststarthook/authorization.openshift.io-ensurenodebootstrap-sa ok Jan 29 12:08:46 crc kubenswrapper[4753]: [+]poststarthook/project.openshift.io-projectcache ok Jan 29 12:08:46 crc kubenswrapper[4753]: [+]poststarthook/project.openshift.io-projectauthorizationcache ok Jan 29 12:08:46 crc kubenswrapper[4753]: [+]poststarthook/openshift.io-startinformers ok Jan 29 12:08:46 crc kubenswrapper[4753]: [+]poststarthook/openshift.io-restmapperupdater ok Jan 29 12:08:46 crc kubenswrapper[4753]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Jan 29 12:08:46 crc kubenswrapper[4753]: livez check failed Jan 29 12:08:46 crc kubenswrapper[4753]: I0129 12:08:46.891524 4753 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-apiserver/apiserver-76f77b778f-nrt2l" podUID="86659644-dec3-4e1a-ba32-5b4487a2f4c8" containerName="openshift-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 29 12:08:46 crc kubenswrapper[4753]: I0129 12:08:46.967104 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:46 crc kubenswrapper[4753]: E0129 12:08:46.967812 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:47.467790872 +0000 UTC m=+141.719872337 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:47 crc kubenswrapper[4753]: I0129 12:08:47.069524 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:47 crc kubenswrapper[4753]: E0129 12:08:47.070733 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:47.570701384 +0000 UTC m=+141.822782859 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:47 crc kubenswrapper[4753]: I0129 12:08:47.172318 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:47 crc kubenswrapper[4753]: E0129 12:08:47.173076 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:47.673046369 +0000 UTC m=+141.925127824 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:47 crc kubenswrapper[4753]: I0129 12:08:47.279105 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:47 crc kubenswrapper[4753]: E0129 12:08:47.279669 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:47.77965084 +0000 UTC m=+142.031732295 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:47 crc kubenswrapper[4753]: I0129 12:08:47.343213 4753 patch_prober.go:28] interesting pod/router-default-5444994796-jpxd9 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 29 12:08:47 crc kubenswrapper[4753]: [-]has-synced failed: reason withheld Jan 29 12:08:47 crc kubenswrapper[4753]: [+]process-running ok Jan 29 12:08:47 crc kubenswrapper[4753]: healthz check failed Jan 29 12:08:47 crc kubenswrapper[4753]: I0129 12:08:47.343316 4753 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-jpxd9" podUID="8e54e1ee-a1b8-454c-8b09-e252ca3fe9da" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 29 12:08:47 crc kubenswrapper[4753]: I0129 12:08:47.380918 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:47 crc kubenswrapper[4753]: E0129 12:08:47.381407 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:47.881390918 +0000 UTC m=+142.133472373 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:47 crc kubenswrapper[4753]: I0129 12:08:47.417186 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-pcxth"] Jan 29 12:08:47 crc kubenswrapper[4753]: I0129 12:08:47.494917 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:47 crc kubenswrapper[4753]: E0129 12:08:47.495731 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:47.995715587 +0000 UTC m=+142.247797042 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:47 crc kubenswrapper[4753]: I0129 12:08:47.521477 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5clxb" event={"ID":"574faad5-0a82-4ff3-b0a8-5390bfd3dc27","Type":"ContainerStarted","Data":"baf50df47fdfe9c1a3c918397fb285aa5e8261643498e8b8b9634d887abe8076"} Jan 29 12:08:47 crc kubenswrapper[4753]: I0129 12:08:47.544718 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"6110c9b0-6745-4468-b8c6-02277a67883c","Type":"ContainerStarted","Data":"baf3f4eaba33450fb507d82fccd760651af5c5e79d27db808da5cef45c7928dd"} Jan 29 12:08:47 crc kubenswrapper[4753]: I0129 12:08:47.699090 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:47 crc kubenswrapper[4753]: E0129 12:08:47.705477 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:48.205434887 +0000 UTC m=+142.457516342 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:47 crc kubenswrapper[4753]: I0129 12:08:47.919159 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:47 crc kubenswrapper[4753]: E0129 12:08:47.919801 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:48.419785103 +0000 UTC m=+142.671866558 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:47 crc kubenswrapper[4753]: I0129 12:08:47.940928 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/revision-pruner-9-crc" podStartSLOduration=15.940906598 podStartE2EDuration="15.940906598s" podCreationTimestamp="2026-01-29 12:08:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:08:47.940858686 +0000 UTC m=+142.192940141" watchObservedRunningTime="2026-01-29 12:08:47.940906598 +0000 UTC m=+142.192988053" Jan 29 12:08:48 crc kubenswrapper[4753]: I0129 12:08:48.018623 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-gntl4" event={"ID":"c612cdf9-575c-4d80-9a48-93e32d5673ae","Type":"ContainerStarted","Data":"140a6e2c8fefb6c70cf7f1ab6ca10a933125be332f1e3fb642f7f810a2469664"} Jan 29 12:08:48 crc kubenswrapper[4753]: I0129 12:08:48.018698 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bsxpd" event={"ID":"ae52688b-6f7a-441f-927b-ab547b7ce44f","Type":"ContainerStarted","Data":"cba8692c5f0bccf235c4375147b2e3d5b3415e156cfb7de056059dfbafba39af"} Jan 29 12:08:48 crc kubenswrapper[4753]: I0129 12:08:48.018709 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"6ab526de-deef-4ec9-9523-95137afadeca","Type":"ContainerStarted","Data":"3d3ab10178160f274d49144985e26f925c1d76f115b03bf3397e6463cf4eef0c"} Jan 29 12:08:48 crc kubenswrapper[4753]: I0129 12:08:48.018720 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-b52ds" event={"ID":"d818fc13-9863-4172-a818-4e01af393842","Type":"ContainerStarted","Data":"5e5726067fab07574a284c6283182c0aebd5cf06746b30d83cdb1ab5d1846519"} Jan 29 12:08:48 crc kubenswrapper[4753]: I0129 12:08:48.018730 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-t89jd" event={"ID":"ce6846a0-6c85-4ae9-afae-b10ead46d21d","Type":"ContainerStarted","Data":"b30f9f9be4fa57fff95b3cd069500f85e18b2e954bc7dd46bc749f91ee31c64c"} Jan 29 12:08:48 crc kubenswrapper[4753]: I0129 12:08:48.023943 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:48 crc kubenswrapper[4753]: E0129 12:08:48.024416 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:48.524391626 +0000 UTC m=+142.776473081 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:48 crc kubenswrapper[4753]: I0129 12:08:48.158157 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:48 crc kubenswrapper[4753]: E0129 12:08:48.158533 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:48.65851928 +0000 UTC m=+142.910600735 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:48 crc kubenswrapper[4753]: I0129 12:08:48.172544 4753 patch_prober.go:28] interesting pod/router-default-5444994796-jpxd9 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 29 12:08:48 crc kubenswrapper[4753]: [-]has-synced failed: reason withheld Jan 29 12:08:48 crc kubenswrapper[4753]: [+]process-running ok Jan 29 12:08:48 crc kubenswrapper[4753]: healthz check failed Jan 29 12:08:48 crc kubenswrapper[4753]: I0129 12:08:48.172616 4753 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-jpxd9" podUID="8e54e1ee-a1b8-454c-8b09-e252ca3fe9da" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 29 12:08:48 crc kubenswrapper[4753]: I0129 12:08:48.182301 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-p7d5d" event={"ID":"6912f3bb-d8bc-4dc1-b3f0-9e7e1e8d354a","Type":"ContainerStarted","Data":"1c8ec29ff27c4bcbf75d6335e8b87e56000c42d1b75e00bcd03cc57c97daf420"} Jan 29 12:08:48 crc kubenswrapper[4753]: I0129 12:08:48.318973 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:48 crc kubenswrapper[4753]: E0129 12:08:48.319788 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:48.819772287 +0000 UTC m=+143.071853742 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:48 crc kubenswrapper[4753]: I0129 12:08:48.379282 4753 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-p7d5d" Jan 29 12:08:48 crc kubenswrapper[4753]: I0129 12:08:48.379330 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-p7d5d" Jan 29 12:08:48 crc kubenswrapper[4753]: I0129 12:08:48.382413 4753 patch_prober.go:28] interesting pod/apiserver-7bbb656c7d-p7d5d container/oauth-apiserver namespace/openshift-oauth-apiserver: Startup probe status=failure output="Get \"https://10.217.0.8:8443/livez\": dial tcp 10.217.0.8:8443: connect: connection refused" start-of-body= Jan 29 12:08:48 crc kubenswrapper[4753]: I0129 12:08:48.382478 4753 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-p7d5d" podUID="6912f3bb-d8bc-4dc1-b3f0-9e7e1e8d354a" containerName="oauth-apiserver" probeResult="failure" output="Get \"https://10.217.0.8:8443/livez\": dial tcp 10.217.0.8:8443: connect: connection refused" Jan 29 12:08:48 crc kubenswrapper[4753]: I0129 12:08:48.425786 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:48 crc kubenswrapper[4753]: E0129 12:08:48.427567 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:48.927551493 +0000 UTC m=+143.179632948 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:48 crc kubenswrapper[4753]: I0129 12:08:48.683447 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:48 crc kubenswrapper[4753]: E0129 12:08:48.684018 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:49.183999103 +0000 UTC m=+143.436080558 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:48 crc kubenswrapper[4753]: I0129 12:08:48.719329 4753 patch_prober.go:28] interesting pod/downloads-7954f5f757-b2rb9 container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" start-of-body= Jan 29 12:08:48 crc kubenswrapper[4753]: I0129 12:08:48.719408 4753 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-b2rb9" podUID="200ffbfe-dff4-45e2-944d-34a3ad56f018" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" Jan 29 12:08:48 crc kubenswrapper[4753]: I0129 12:08:48.719740 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-dns/dns-default-jrblr" Jan 29 12:08:48 crc kubenswrapper[4753]: I0129 12:08:48.721430 4753 patch_prober.go:28] interesting pod/downloads-7954f5f757-b2rb9 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" start-of-body= Jan 29 12:08:48 crc kubenswrapper[4753]: I0129 12:08:48.721459 4753 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-b2rb9" podUID="200ffbfe-dff4-45e2-944d-34a3ad56f018" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" Jan 29 12:08:48 crc kubenswrapper[4753]: I0129 12:08:48.721759 4753 patch_prober.go:28] interesting pod/console-f9d7485db-mql86 container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.15:8443/health\": dial tcp 10.217.0.15:8443: connect: connection refused" start-of-body= Jan 29 12:08:48 crc kubenswrapper[4753]: I0129 12:08:48.721781 4753 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-mql86" podUID="91845c42-ac54-4831-b8ac-73737902b703" containerName="console" probeResult="failure" output="Get \"https://10.217.0.15:8443/health\": dial tcp 10.217.0.15:8443: connect: connection refused" Jan 29 12:08:48 crc kubenswrapper[4753]: I0129 12:08:48.735643 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-lswml" Jan 29 12:08:48 crc kubenswrapper[4753]: I0129 12:08:48.749090 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-v88cc" Jan 29 12:08:48 crc kubenswrapper[4753]: I0129 12:08:48.785717 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:48 crc kubenswrapper[4753]: E0129 12:08:48.786413 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:49.28639197 +0000 UTC m=+143.538473425 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:48 crc kubenswrapper[4753]: I0129 12:08:48.976733 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:48 crc kubenswrapper[4753]: E0129 12:08:48.977869 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:49.47784836 +0000 UTC m=+143.729929815 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:49 crc kubenswrapper[4753]: I0129 12:08:49.044530 4753 plugin_watcher.go:194] "Adding socket path or updating timestamp to desired state cache" path="/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock" Jan 29 12:08:49 crc kubenswrapper[4753]: I0129 12:08:49.055220 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-58897d9998-znsp6" Jan 29 12:08:49 crc kubenswrapper[4753]: I0129 12:08:49.101600 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:49 crc kubenswrapper[4753]: E0129 12:08:49.106119 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:49.60608129 +0000 UTC m=+143.858162745 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:49 crc kubenswrapper[4753]: I0129 12:08:49.161344 4753 reconciler.go:161] "OperationExecutor.RegisterPlugin started" plugin={"SocketPath":"/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock","Timestamp":"2026-01-29T12:08:49.044581412Z","Handler":null,"Name":""} Jan 29 12:08:49 crc kubenswrapper[4753]: I0129 12:08:49.205154 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:49 crc kubenswrapper[4753]: E0129 12:08:49.207612 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 12:08:49.70756768 +0000 UTC m=+143.959649135 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:49 crc kubenswrapper[4753]: I0129 12:08:49.208835 4753 patch_prober.go:28] interesting pod/router-default-5444994796-jpxd9 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 29 12:08:49 crc kubenswrapper[4753]: [-]has-synced failed: reason withheld Jan 29 12:08:49 crc kubenswrapper[4753]: [+]process-running ok Jan 29 12:08:49 crc kubenswrapper[4753]: healthz check failed Jan 29 12:08:49 crc kubenswrapper[4753]: I0129 12:08:49.208941 4753 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-jpxd9" podUID="8e54e1ee-a1b8-454c-8b09-e252ca3fe9da" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 29 12:08:49 crc kubenswrapper[4753]: I0129 12:08:49.255509 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-8kz2s" Jan 29 12:08:49 crc kubenswrapper[4753]: I0129 12:08:49.468847 4753 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-zdhjn container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.12:6443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 29 12:08:49 crc kubenswrapper[4753]: I0129 12:08:49.480938 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:49 crc kubenswrapper[4753]: E0129 12:08:49.481476 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 12:08:49.981459837 +0000 UTC m=+144.233541302 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w6vx6" (UID: "df61f830-b312-4a01-8d17-057799312936") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 12:08:49 crc kubenswrapper[4753]: I0129 12:08:49.481519 4753 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-zdhjn" podUID="9728fd7e-6203-4082-9297-2d3fd9e17b74" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.12:6443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 29 12:08:50 crc kubenswrapper[4753]: I0129 12:08:49.512907 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pcxth" event={"ID":"225f75d2-06ff-4a8e-ad48-5fb73aba9a5f","Type":"ContainerStarted","Data":"f007ee8d1dc94fa1e56a9e37455b63b58c720074b701d7e1ca3eb21b5f74b718"} Jan 29 12:08:50 crc kubenswrapper[4753]: I0129 12:08:49.774208 4753 csi_plugin.go:100] kubernetes.io/csi: Trying to validate a new CSI Driver with name: kubevirt.io.hostpath-provisioner endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock versions: 1.0.0 Jan 29 12:08:50 crc kubenswrapper[4753]: I0129 12:08:49.774285 4753 csi_plugin.go:113] kubernetes.io/csi: Register new plugin with name: kubevirt.io.hostpath-provisioner at endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock Jan 29 12:08:50 crc kubenswrapper[4753]: I0129 12:08:49.775490 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 12:08:50 crc kubenswrapper[4753]: I0129 12:08:49.805832 4753 generic.go:334] "Generic (PLEG): container finished" podID="6110c9b0-6745-4468-b8c6-02277a67883c" containerID="baf3f4eaba33450fb507d82fccd760651af5c5e79d27db808da5cef45c7928dd" exitCode=0 Jan 29 12:08:50 crc kubenswrapper[4753]: I0129 12:08:49.806026 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"6110c9b0-6745-4468-b8c6-02277a67883c","Type":"ContainerDied","Data":"baf3f4eaba33450fb507d82fccd760651af5c5e79d27db808da5cef45c7928dd"} Jan 29 12:08:50 crc kubenswrapper[4753]: I0129 12:08:49.990048 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 29 12:08:50 crc kubenswrapper[4753]: I0129 12:08:49.994238 4753 generic.go:334] "Generic (PLEG): container finished" podID="4f48974c-8787-482a-b962-f7646e12952e" containerID="622532cb2281d0d8e36ef13ffa7a1468db1e0cd54769f22c744d688794ab718f" exitCode=0 Jan 29 12:08:50 crc kubenswrapper[4753]: I0129 12:08:50.086668 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:50 crc kubenswrapper[4753]: I0129 12:08:50.093934 4753 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 29 12:08:50 crc kubenswrapper[4753]: I0129 12:08:50.093978 4753 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount\"" pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:50 crc kubenswrapper[4753]: I0129 12:08:50.126140 4753 patch_prober.go:28] interesting pod/router-default-5444994796-jpxd9 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 29 12:08:50 crc kubenswrapper[4753]: [-]has-synced failed: reason withheld Jan 29 12:08:50 crc kubenswrapper[4753]: [+]process-running ok Jan 29 12:08:50 crc kubenswrapper[4753]: healthz check failed Jan 29 12:08:50 crc kubenswrapper[4753]: I0129 12:08:50.126460 4753 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-jpxd9" podUID="8e54e1ee-a1b8-454c-8b09-e252ca3fe9da" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 29 12:08:50 crc kubenswrapper[4753]: I0129 12:08:50.128651 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f668bae-612b-4b75-9490-919e737c6a3b" path="/var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes" Jan 29 12:08:50 crc kubenswrapper[4753]: I0129 12:08:50.129502 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494800-86kw2" event={"ID":"4f48974c-8787-482a-b962-f7646e12952e","Type":"ContainerDied","Data":"622532cb2281d0d8e36ef13ffa7a1468db1e0cd54769f22c744d688794ab718f"} Jan 29 12:08:50 crc kubenswrapper[4753]: I0129 12:08:50.129552 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"6ab526de-deef-4ec9-9523-95137afadeca","Type":"ContainerStarted","Data":"16968dfcec67617214cc77dd9363f662bbdacf15c299011f19c7a572a71aef17"} Jan 29 12:08:50 crc kubenswrapper[4753]: I0129 12:08:50.444404 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-dgs2s"] Jan 29 12:08:50 crc kubenswrapper[4753]: I0129 12:08:50.507532 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-z6xk4"] Jan 29 12:08:51 crc kubenswrapper[4753]: I0129 12:08:51.031753 4753 generic.go:334] "Generic (PLEG): container finished" podID="d818fc13-9863-4172-a818-4e01af393842" containerID="33edee0526bd35ce082b98012b2a099d3dac7fdf927c548b0acc82f4b3237602" exitCode=0 Jan 29 12:08:51 crc kubenswrapper[4753]: I0129 12:08:51.032340 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-b52ds" event={"ID":"d818fc13-9863-4172-a818-4e01af393842","Type":"ContainerDied","Data":"33edee0526bd35ce082b98012b2a099d3dac7fdf927c548b0acc82f4b3237602"} Jan 29 12:08:51 crc kubenswrapper[4753]: I0129 12:08:51.036543 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-z6xk4" event={"ID":"d0809beb-ae87-4bf7-aa2d-20dbe819c3cc","Type":"ContainerStarted","Data":"cfe310ce50ed328fbce042aaec73f6fc4aefee5317448fb747418fd8b4431eaf"} Jan 29 12:08:51 crc kubenswrapper[4753]: I0129 12:08:51.045073 4753 generic.go:334] "Generic (PLEG): container finished" podID="225f75d2-06ff-4a8e-ad48-5fb73aba9a5f" containerID="4e5f7ed97e213c66f79f0331b2c0bcb3c9f1f860e531af2ef4c7ae4165b2eb97" exitCode=0 Jan 29 12:08:51 crc kubenswrapper[4753]: I0129 12:08:51.045181 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pcxth" event={"ID":"225f75d2-06ff-4a8e-ad48-5fb73aba9a5f","Type":"ContainerDied","Data":"4e5f7ed97e213c66f79f0331b2c0bcb3c9f1f860e531af2ef4c7ae4165b2eb97"} Jan 29 12:08:51 crc kubenswrapper[4753]: I0129 12:08:51.045483 4753 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 29 12:08:51 crc kubenswrapper[4753]: I0129 12:08:51.123922 4753 generic.go:334] "Generic (PLEG): container finished" podID="ce6846a0-6c85-4ae9-afae-b10ead46d21d" containerID="722ecd34f28ac3858447e086a8206c6587de0c3b85a51c6563402fa891748423" exitCode=0 Jan 29 12:08:51 crc kubenswrapper[4753]: I0129 12:08:51.124192 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-t89jd" event={"ID":"ce6846a0-6c85-4ae9-afae-b10ead46d21d","Type":"ContainerDied","Data":"722ecd34f28ac3858447e086a8206c6587de0c3b85a51c6563402fa891748423"} Jan 29 12:08:51 crc kubenswrapper[4753]: I0129 12:08:51.126863 4753 patch_prober.go:28] interesting pod/router-default-5444994796-jpxd9 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 29 12:08:51 crc kubenswrapper[4753]: [-]has-synced failed: reason withheld Jan 29 12:08:51 crc kubenswrapper[4753]: [+]process-running ok Jan 29 12:08:51 crc kubenswrapper[4753]: healthz check failed Jan 29 12:08:51 crc kubenswrapper[4753]: I0129 12:08:51.126941 4753 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-jpxd9" podUID="8e54e1ee-a1b8-454c-8b09-e252ca3fe9da" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 29 12:08:51 crc kubenswrapper[4753]: I0129 12:08:51.152552 4753 generic.go:334] "Generic (PLEG): container finished" podID="574faad5-0a82-4ff3-b0a8-5390bfd3dc27" containerID="e0c6201cc9ca175420256e1854850c196a5f5fc6725baaf71ea229bb97deb642" exitCode=0 Jan 29 12:08:51 crc kubenswrapper[4753]: I0129 12:08:51.152636 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5clxb" event={"ID":"574faad5-0a82-4ff3-b0a8-5390bfd3dc27","Type":"ContainerDied","Data":"e0c6201cc9ca175420256e1854850c196a5f5fc6725baaf71ea229bb97deb642"} Jan 29 12:08:51 crc kubenswrapper[4753]: I0129 12:08:51.211220 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-8-crc" podStartSLOduration=15.211189889 podStartE2EDuration="15.211189889s" podCreationTimestamp="2026-01-29 12:08:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:08:50.975492231 +0000 UTC m=+145.227573686" watchObservedRunningTime="2026-01-29 12:08:51.211189889 +0000 UTC m=+145.463271344" Jan 29 12:08:51 crc kubenswrapper[4753]: I0129 12:08:51.218829 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-7dkr6"] Jan 29 12:08:51 crc kubenswrapper[4753]: I0129 12:08:51.250090 4753 generic.go:334] "Generic (PLEG): container finished" podID="ae52688b-6f7a-441f-927b-ab547b7ce44f" containerID="2adff0c5060cee3a66c1e74eec771087277951d3650b517c6a798dca255f979e" exitCode=0 Jan 29 12:08:51 crc kubenswrapper[4753]: I0129 12:08:51.251438 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bsxpd" event={"ID":"ae52688b-6f7a-441f-927b-ab547b7ce44f","Type":"ContainerDied","Data":"2adff0c5060cee3a66c1e74eec771087277951d3650b517c6a798dca255f979e"} Jan 29 12:08:51 crc kubenswrapper[4753]: I0129 12:08:51.275608 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-gntl4" event={"ID":"c612cdf9-575c-4d80-9a48-93e32d5673ae","Type":"ContainerStarted","Data":"5341089950f467d3d22f50ec1acc6e1de2660fee46b362763a6d563fe1358fb8"} Jan 29 12:08:51 crc kubenswrapper[4753]: I0129 12:08:51.320020 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w6vx6\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:51 crc kubenswrapper[4753]: I0129 12:08:51.348525 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dgs2s" event={"ID":"13c02ae6-2d17-4b39-9f64-4a7a99a6ffc6","Type":"ContainerStarted","Data":"ce2df2f05c3545fe8420be669b7d215a797e028d4ba03e059c7db276b0b06280"} Jan 29 12:08:51 crc kubenswrapper[4753]: I0129 12:08:51.798393 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Jan 29 12:08:51 crc kubenswrapper[4753]: I0129 12:08:51.804532 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:51 crc kubenswrapper[4753]: I0129 12:08:51.844069 4753 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-76f77b778f-nrt2l" Jan 29 12:08:51 crc kubenswrapper[4753]: I0129 12:08:51.859646 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-p7d5d" podStartSLOduration=112.859612257 podStartE2EDuration="1m52.859612257s" podCreationTimestamp="2026-01-29 12:06:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:08:51.857981028 +0000 UTC m=+146.110062503" watchObservedRunningTime="2026-01-29 12:08:51.859612257 +0000 UTC m=+146.111693712" Jan 29 12:08:51 crc kubenswrapper[4753]: I0129 12:08:51.906613 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-76f77b778f-nrt2l" Jan 29 12:08:52 crc kubenswrapper[4753]: I0129 12:08:52.130522 4753 patch_prober.go:28] interesting pod/router-default-5444994796-jpxd9 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 29 12:08:52 crc kubenswrapper[4753]: [-]has-synced failed: reason withheld Jan 29 12:08:52 crc kubenswrapper[4753]: [+]process-running ok Jan 29 12:08:52 crc kubenswrapper[4753]: healthz check failed Jan 29 12:08:52 crc kubenswrapper[4753]: I0129 12:08:52.130971 4753 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-jpxd9" podUID="8e54e1ee-a1b8-454c-8b09-e252ca3fe9da" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 29 12:08:52 crc kubenswrapper[4753]: I0129 12:08:52.795181 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7dkr6" event={"ID":"318aa5db-6b19-4efe-8c5d-00fbb4a84b13","Type":"ContainerStarted","Data":"f43d1fdc19a8e98625a8968f7cb0a82276aafd99c8212a9ee92cd8b070de1f47"} Jan 29 12:08:52 crc kubenswrapper[4753]: I0129 12:08:52.811508 4753 generic.go:334] "Generic (PLEG): container finished" podID="6ab526de-deef-4ec9-9523-95137afadeca" containerID="16968dfcec67617214cc77dd9363f662bbdacf15c299011f19c7a572a71aef17" exitCode=0 Jan 29 12:08:52 crc kubenswrapper[4753]: I0129 12:08:52.811598 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"6ab526de-deef-4ec9-9523-95137afadeca","Type":"ContainerDied","Data":"16968dfcec67617214cc77dd9363f662bbdacf15c299011f19c7a572a71aef17"} Jan 29 12:08:52 crc kubenswrapper[4753]: I0129 12:08:52.816745 4753 generic.go:334] "Generic (PLEG): container finished" podID="13c02ae6-2d17-4b39-9f64-4a7a99a6ffc6" containerID="bf1c8d45dc92bd08c960afa54a9a560ff456e1f48c9f29d50cfc4b5575df807c" exitCode=0 Jan 29 12:08:52 crc kubenswrapper[4753]: I0129 12:08:52.817202 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dgs2s" event={"ID":"13c02ae6-2d17-4b39-9f64-4a7a99a6ffc6","Type":"ContainerDied","Data":"bf1c8d45dc92bd08c960afa54a9a560ff456e1f48c9f29d50cfc4b5575df807c"} Jan 29 12:08:52 crc kubenswrapper[4753]: I0129 12:08:52.819468 4753 generic.go:334] "Generic (PLEG): container finished" podID="d0809beb-ae87-4bf7-aa2d-20dbe819c3cc" containerID="05fa1781d9365a5911b342bc5dc122c3b11fc628bfd851f08cca687d70e73d40" exitCode=0 Jan 29 12:08:52 crc kubenswrapper[4753]: I0129 12:08:52.821444 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-z6xk4" event={"ID":"d0809beb-ae87-4bf7-aa2d-20dbe819c3cc","Type":"ContainerDied","Data":"05fa1781d9365a5911b342bc5dc122c3b11fc628bfd851f08cca687d70e73d40"} Jan 29 12:08:54 crc kubenswrapper[4753]: I0129 12:08:53.476211 4753 patch_prober.go:28] interesting pod/router-default-5444994796-jpxd9 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 29 12:08:54 crc kubenswrapper[4753]: [-]has-synced failed: reason withheld Jan 29 12:08:54 crc kubenswrapper[4753]: [+]process-running ok Jan 29 12:08:54 crc kubenswrapper[4753]: healthz check failed Jan 29 12:08:54 crc kubenswrapper[4753]: I0129 12:08:53.476427 4753 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-jpxd9" podUID="8e54e1ee-a1b8-454c-8b09-e252ca3fe9da" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 29 12:08:54 crc kubenswrapper[4753]: I0129 12:08:54.175755 4753 patch_prober.go:28] interesting pod/router-default-5444994796-jpxd9 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 29 12:08:54 crc kubenswrapper[4753]: [-]has-synced failed: reason withheld Jan 29 12:08:54 crc kubenswrapper[4753]: [+]process-running ok Jan 29 12:08:54 crc kubenswrapper[4753]: healthz check failed Jan 29 12:08:54 crc kubenswrapper[4753]: I0129 12:08:54.176167 4753 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-jpxd9" podUID="8e54e1ee-a1b8-454c-8b09-e252ca3fe9da" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 29 12:08:54 crc kubenswrapper[4753]: I0129 12:08:54.271601 4753 generic.go:334] "Generic (PLEG): container finished" podID="318aa5db-6b19-4efe-8c5d-00fbb4a84b13" containerID="cc09a810c4a879ba64353b140c24967cc6702f73a203166a1a4d66e7208f8fb3" exitCode=0 Jan 29 12:08:54 crc kubenswrapper[4753]: I0129 12:08:54.274436 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7dkr6" event={"ID":"318aa5db-6b19-4efe-8c5d-00fbb4a84b13","Type":"ContainerDied","Data":"cc09a810c4a879ba64353b140c24967cc6702f73a203166a1a4d66e7208f8fb3"} Jan 29 12:08:54 crc kubenswrapper[4753]: I0129 12:08:54.343924 4753 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-p7d5d" Jan 29 12:08:54 crc kubenswrapper[4753]: I0129 12:08:54.844152 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-p7d5d" Jan 29 12:08:54 crc kubenswrapper[4753]: I0129 12:08:54.947900 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494800-86kw2" Jan 29 12:08:54 crc kubenswrapper[4753]: I0129 12:08:54.969612 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4f48974c-8787-482a-b962-f7646e12952e-secret-volume\") pod \"4f48974c-8787-482a-b962-f7646e12952e\" (UID: \"4f48974c-8787-482a-b962-f7646e12952e\") " Jan 29 12:08:54 crc kubenswrapper[4753]: I0129 12:08:54.969774 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4f48974c-8787-482a-b962-f7646e12952e-config-volume\") pod \"4f48974c-8787-482a-b962-f7646e12952e\" (UID: \"4f48974c-8787-482a-b962-f7646e12952e\") " Jan 29 12:08:54 crc kubenswrapper[4753]: I0129 12:08:54.969817 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4cwgg\" (UniqueName: \"kubernetes.io/projected/4f48974c-8787-482a-b962-f7646e12952e-kube-api-access-4cwgg\") pod \"4f48974c-8787-482a-b962-f7646e12952e\" (UID: \"4f48974c-8787-482a-b962-f7646e12952e\") " Jan 29 12:08:54 crc kubenswrapper[4753]: I0129 12:08:54.971544 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4f48974c-8787-482a-b962-f7646e12952e-config-volume" (OuterVolumeSpecName: "config-volume") pod "4f48974c-8787-482a-b962-f7646e12952e" (UID: "4f48974c-8787-482a-b962-f7646e12952e"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:08:55 crc kubenswrapper[4753]: I0129 12:08:54.993713 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4f48974c-8787-482a-b962-f7646e12952e-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "4f48974c-8787-482a-b962-f7646e12952e" (UID: "4f48974c-8787-482a-b962-f7646e12952e"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:08:55 crc kubenswrapper[4753]: I0129 12:08:54.994451 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4f48974c-8787-482a-b962-f7646e12952e-kube-api-access-4cwgg" (OuterVolumeSpecName: "kube-api-access-4cwgg") pod "4f48974c-8787-482a-b962-f7646e12952e" (UID: "4f48974c-8787-482a-b962-f7646e12952e"). InnerVolumeSpecName "kube-api-access-4cwgg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:08:55 crc kubenswrapper[4753]: I0129 12:08:55.047455 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-86kqx"] Jan 29 12:08:55 crc kubenswrapper[4753]: I0129 12:08:55.047946 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-879f6c89f-86kqx" podUID="3628b07e-d247-4487-8095-821bf56656b8" containerName="controller-manager" containerID="cri-o://2268ec3aed83352853aba4c1e7db0f6a129e057add397b1db4b3b469771803ee" gracePeriod=30 Jan 29 12:08:55 crc kubenswrapper[4753]: I0129 12:08:55.076320 4753 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4f48974c-8787-482a-b962-f7646e12952e-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 29 12:08:55 crc kubenswrapper[4753]: I0129 12:08:55.076368 4753 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4f48974c-8787-482a-b962-f7646e12952e-config-volume\") on node \"crc\" DevicePath \"\"" Jan 29 12:08:55 crc kubenswrapper[4753]: I0129 12:08:55.076381 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4cwgg\" (UniqueName: \"kubernetes.io/projected/4f48974c-8787-482a-b962-f7646e12952e-kube-api-access-4cwgg\") on node \"crc\" DevicePath \"\"" Jan 29 12:08:55 crc kubenswrapper[4753]: I0129 12:08:55.122983 4753 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Jan 29 12:08:55 crc kubenswrapper[4753]: I0129 12:08:55.126272 4753 patch_prober.go:28] interesting pod/router-default-5444994796-jpxd9 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 29 12:08:55 crc kubenswrapper[4753]: [-]has-synced failed: reason withheld Jan 29 12:08:55 crc kubenswrapper[4753]: [+]process-running ok Jan 29 12:08:55 crc kubenswrapper[4753]: healthz check failed Jan 29 12:08:55 crc kubenswrapper[4753]: I0129 12:08:55.126333 4753 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-jpxd9" podUID="8e54e1ee-a1b8-454c-8b09-e252ca3fe9da" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 29 12:08:55 crc kubenswrapper[4753]: I0129 12:08:55.238817 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-dx8db"] Jan 29 12:08:55 crc kubenswrapper[4753]: I0129 12:08:55.253288 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-dx8db" podUID="6a22bc59-6a04-49b4-aa79-225e9571ee71" containerName="route-controller-manager" containerID="cri-o://c65e0e7b24bf4c9f886988311d2280381b0facfcaf1b00f7415658001768b087" gracePeriod=30 Jan 29 12:08:55 crc kubenswrapper[4753]: I0129 12:08:55.307936 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494800-86kw2" event={"ID":"4f48974c-8787-482a-b962-f7646e12952e","Type":"ContainerDied","Data":"aaf1083b3586b0a7b187cdb6a824407ea02b448d1b5d8ddb4236b4b8fe56f942"} Jan 29 12:08:55 crc kubenswrapper[4753]: I0129 12:08:55.308633 4753 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="aaf1083b3586b0a7b187cdb6a824407ea02b448d1b5d8ddb4236b4b8fe56f942" Jan 29 12:08:55 crc kubenswrapper[4753]: I0129 12:08:55.308940 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494800-86kw2" Jan 29 12:08:55 crc kubenswrapper[4753]: I0129 12:08:55.340554 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-gntl4" event={"ID":"c612cdf9-575c-4d80-9a48-93e32d5673ae","Type":"ContainerStarted","Data":"d736a41114bd37e5a0fbf5a00113a754e901d4e2f8a8453693ff96e19db29538"} Jan 29 12:08:55 crc kubenswrapper[4753]: I0129 12:08:55.525068 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-w6vx6"] Jan 29 12:08:55 crc kubenswrapper[4753]: I0129 12:08:55.769936 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 29 12:08:55 crc kubenswrapper[4753]: I0129 12:08:55.926474 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 29 12:08:55 crc kubenswrapper[4753]: I0129 12:08:55.933158 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/6110c9b0-6745-4468-b8c6-02277a67883c-kubelet-dir\") pod \"6110c9b0-6745-4468-b8c6-02277a67883c\" (UID: \"6110c9b0-6745-4468-b8c6-02277a67883c\") " Jan 29 12:08:55 crc kubenswrapper[4753]: I0129 12:08:55.933228 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/6110c9b0-6745-4468-b8c6-02277a67883c-kube-api-access\") pod \"6110c9b0-6745-4468-b8c6-02277a67883c\" (UID: \"6110c9b0-6745-4468-b8c6-02277a67883c\") " Jan 29 12:08:55 crc kubenswrapper[4753]: I0129 12:08:55.934705 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6110c9b0-6745-4468-b8c6-02277a67883c-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "6110c9b0-6745-4468-b8c6-02277a67883c" (UID: "6110c9b0-6745-4468-b8c6-02277a67883c"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 12:08:55 crc kubenswrapper[4753]: I0129 12:08:55.940578 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6110c9b0-6745-4468-b8c6-02277a67883c-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "6110c9b0-6745-4468-b8c6-02277a67883c" (UID: "6110c9b0-6745-4468-b8c6-02277a67883c"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:08:56 crc kubenswrapper[4753]: I0129 12:08:56.284692 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/6ab526de-deef-4ec9-9523-95137afadeca-kube-api-access\") pod \"6ab526de-deef-4ec9-9523-95137afadeca\" (UID: \"6ab526de-deef-4ec9-9523-95137afadeca\") " Jan 29 12:08:56 crc kubenswrapper[4753]: I0129 12:08:56.284890 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/6ab526de-deef-4ec9-9523-95137afadeca-kubelet-dir\") pod \"6ab526de-deef-4ec9-9523-95137afadeca\" (UID: \"6ab526de-deef-4ec9-9523-95137afadeca\") " Jan 29 12:08:56 crc kubenswrapper[4753]: I0129 12:08:56.287383 4753 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/6110c9b0-6745-4468-b8c6-02277a67883c-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 29 12:08:56 crc kubenswrapper[4753]: I0129 12:08:56.287487 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/6110c9b0-6745-4468-b8c6-02277a67883c-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 29 12:08:56 crc kubenswrapper[4753]: I0129 12:08:56.287719 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6ab526de-deef-4ec9-9523-95137afadeca-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "6ab526de-deef-4ec9-9523-95137afadeca" (UID: "6ab526de-deef-4ec9-9523-95137afadeca"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 12:08:56 crc kubenswrapper[4753]: I0129 12:08:56.389531 4753 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/6ab526de-deef-4ec9-9523-95137afadeca-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 29 12:08:56 crc kubenswrapper[4753]: I0129 12:08:56.432851 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ab526de-deef-4ec9-9523-95137afadeca-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "6ab526de-deef-4ec9-9523-95137afadeca" (UID: "6ab526de-deef-4ec9-9523-95137afadeca"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:08:56 crc kubenswrapper[4753]: I0129 12:08:56.444531 4753 patch_prober.go:28] interesting pod/router-default-5444994796-jpxd9 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 29 12:08:56 crc kubenswrapper[4753]: [-]has-synced failed: reason withheld Jan 29 12:08:56 crc kubenswrapper[4753]: [+]process-running ok Jan 29 12:08:56 crc kubenswrapper[4753]: healthz check failed Jan 29 12:08:56 crc kubenswrapper[4753]: I0129 12:08:56.444615 4753 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-jpxd9" podUID="8e54e1ee-a1b8-454c-8b09-e252ca3fe9da" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 29 12:08:56 crc kubenswrapper[4753]: I0129 12:08:56.508419 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/6ab526de-deef-4ec9-9523-95137afadeca-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 29 12:08:56 crc kubenswrapper[4753]: I0129 12:08:56.568529 4753 generic.go:334] "Generic (PLEG): container finished" podID="6a22bc59-6a04-49b4-aa79-225e9571ee71" containerID="c65e0e7b24bf4c9f886988311d2280381b0facfcaf1b00f7415658001768b087" exitCode=0 Jan 29 12:08:56 crc kubenswrapper[4753]: I0129 12:08:56.568629 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-dx8db" event={"ID":"6a22bc59-6a04-49b4-aa79-225e9571ee71","Type":"ContainerDied","Data":"c65e0e7b24bf4c9f886988311d2280381b0facfcaf1b00f7415658001768b087"} Jan 29 12:08:56 crc kubenswrapper[4753]: I0129 12:08:56.571199 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"6110c9b0-6745-4468-b8c6-02277a67883c","Type":"ContainerDied","Data":"5bb90e4ba3a27532b352b2891a9630fc731f8e5407fb088959cd760e0198b677"} Jan 29 12:08:56 crc kubenswrapper[4753]: I0129 12:08:56.571808 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 29 12:08:56 crc kubenswrapper[4753]: I0129 12:08:56.585110 4753 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5bb90e4ba3a27532b352b2891a9630fc731f8e5407fb088959cd760e0198b677" Jan 29 12:08:56 crc kubenswrapper[4753]: I0129 12:08:56.591504 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" event={"ID":"df61f830-b312-4a01-8d17-057799312936","Type":"ContainerStarted","Data":"003f507526dc99f895161873c068f031ad3529cf912847135409593600a77d55"} Jan 29 12:08:56 crc kubenswrapper[4753]: I0129 12:08:56.591567 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" event={"ID":"df61f830-b312-4a01-8d17-057799312936","Type":"ContainerStarted","Data":"442a1d8a690b33f4f8314dec6144e50209b7db8e5bc4179bb526d947568780da"} Jan 29 12:08:56 crc kubenswrapper[4753]: I0129 12:08:56.593324 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:08:56 crc kubenswrapper[4753]: I0129 12:08:56.654785 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"6ab526de-deef-4ec9-9523-95137afadeca","Type":"ContainerDied","Data":"3d3ab10178160f274d49144985e26f925c1d76f115b03bf3397e6463cf4eef0c"} Jan 29 12:08:56 crc kubenswrapper[4753]: I0129 12:08:56.655406 4753 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3d3ab10178160f274d49144985e26f925c1d76f115b03bf3397e6463cf4eef0c" Jan 29 12:08:56 crc kubenswrapper[4753]: I0129 12:08:56.655514 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 29 12:08:56 crc kubenswrapper[4753]: I0129 12:08:56.715961 4753 generic.go:334] "Generic (PLEG): container finished" podID="3628b07e-d247-4487-8095-821bf56656b8" containerID="2268ec3aed83352853aba4c1e7db0f6a129e057add397b1db4b3b469771803ee" exitCode=0 Jan 29 12:08:56 crc kubenswrapper[4753]: I0129 12:08:56.721225 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" podStartSLOduration=118.721175386 podStartE2EDuration="1m58.721175386s" podCreationTimestamp="2026-01-29 12:06:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:08:56.718373453 +0000 UTC m=+150.970454928" watchObservedRunningTime="2026-01-29 12:08:56.721175386 +0000 UTC m=+150.973256841" Jan 29 12:08:56 crc kubenswrapper[4753]: I0129 12:08:56.721375 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-86kqx" event={"ID":"3628b07e-d247-4487-8095-821bf56656b8","Type":"ContainerDied","Data":"2268ec3aed83352853aba4c1e7db0f6a129e057add397b1db4b3b469771803ee"} Jan 29 12:08:56 crc kubenswrapper[4753]: I0129 12:08:56.918390 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="hostpath-provisioner/csi-hostpathplugin-gntl4" podStartSLOduration=41.918364675 podStartE2EDuration="41.918364675s" podCreationTimestamp="2026-01-29 12:08:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:08:56.902356352 +0000 UTC m=+151.154437827" watchObservedRunningTime="2026-01-29 12:08:56.918364675 +0000 UTC m=+151.170446130" Jan 29 12:08:57 crc kubenswrapper[4753]: I0129 12:08:57.064652 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-86kqx" Jan 29 12:08:57 crc kubenswrapper[4753]: I0129 12:08:57.127337 4753 patch_prober.go:28] interesting pod/router-default-5444994796-jpxd9 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 29 12:08:57 crc kubenswrapper[4753]: [-]has-synced failed: reason withheld Jan 29 12:08:57 crc kubenswrapper[4753]: [+]process-running ok Jan 29 12:08:57 crc kubenswrapper[4753]: healthz check failed Jan 29 12:08:57 crc kubenswrapper[4753]: I0129 12:08:57.127770 4753 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-jpxd9" podUID="8e54e1ee-a1b8-454c-8b09-e252ca3fe9da" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 29 12:08:57 crc kubenswrapper[4753]: I0129 12:08:57.216441 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3628b07e-d247-4487-8095-821bf56656b8-serving-cert\") pod \"3628b07e-d247-4487-8095-821bf56656b8\" (UID: \"3628b07e-d247-4487-8095-821bf56656b8\") " Jan 29 12:08:57 crc kubenswrapper[4753]: I0129 12:08:57.218256 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q4nmh\" (UniqueName: \"kubernetes.io/projected/3628b07e-d247-4487-8095-821bf56656b8-kube-api-access-q4nmh\") pod \"3628b07e-d247-4487-8095-821bf56656b8\" (UID: \"3628b07e-d247-4487-8095-821bf56656b8\") " Jan 29 12:08:57 crc kubenswrapper[4753]: I0129 12:08:57.218351 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/3628b07e-d247-4487-8095-821bf56656b8-client-ca\") pod \"3628b07e-d247-4487-8095-821bf56656b8\" (UID: \"3628b07e-d247-4487-8095-821bf56656b8\") " Jan 29 12:08:57 crc kubenswrapper[4753]: I0129 12:08:57.218488 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3628b07e-d247-4487-8095-821bf56656b8-config\") pod \"3628b07e-d247-4487-8095-821bf56656b8\" (UID: \"3628b07e-d247-4487-8095-821bf56656b8\") " Jan 29 12:08:57 crc kubenswrapper[4753]: I0129 12:08:57.218569 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/3628b07e-d247-4487-8095-821bf56656b8-proxy-ca-bundles\") pod \"3628b07e-d247-4487-8095-821bf56656b8\" (UID: \"3628b07e-d247-4487-8095-821bf56656b8\") " Jan 29 12:08:57 crc kubenswrapper[4753]: I0129 12:08:57.234355 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3628b07e-d247-4487-8095-821bf56656b8-config" (OuterVolumeSpecName: "config") pod "3628b07e-d247-4487-8095-821bf56656b8" (UID: "3628b07e-d247-4487-8095-821bf56656b8"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:08:57 crc kubenswrapper[4753]: I0129 12:08:57.238179 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3628b07e-d247-4487-8095-821bf56656b8-client-ca" (OuterVolumeSpecName: "client-ca") pod "3628b07e-d247-4487-8095-821bf56656b8" (UID: "3628b07e-d247-4487-8095-821bf56656b8"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:08:57 crc kubenswrapper[4753]: I0129 12:08:57.243886 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3628b07e-d247-4487-8095-821bf56656b8-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "3628b07e-d247-4487-8095-821bf56656b8" (UID: "3628b07e-d247-4487-8095-821bf56656b8"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:08:57 crc kubenswrapper[4753]: I0129 12:08:57.300332 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3628b07e-d247-4487-8095-821bf56656b8-kube-api-access-q4nmh" (OuterVolumeSpecName: "kube-api-access-q4nmh") pod "3628b07e-d247-4487-8095-821bf56656b8" (UID: "3628b07e-d247-4487-8095-821bf56656b8"). InnerVolumeSpecName "kube-api-access-q4nmh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:08:57 crc kubenswrapper[4753]: I0129 12:08:57.347751 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-dx8db" Jan 29 12:08:57 crc kubenswrapper[4753]: I0129 12:08:57.368431 4753 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3628b07e-d247-4487-8095-821bf56656b8-config\") on node \"crc\" DevicePath \"\"" Jan 29 12:08:57 crc kubenswrapper[4753]: I0129 12:08:57.368471 4753 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/3628b07e-d247-4487-8095-821bf56656b8-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 29 12:08:57 crc kubenswrapper[4753]: I0129 12:08:57.368484 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q4nmh\" (UniqueName: \"kubernetes.io/projected/3628b07e-d247-4487-8095-821bf56656b8-kube-api-access-q4nmh\") on node \"crc\" DevicePath \"\"" Jan 29 12:08:57 crc kubenswrapper[4753]: I0129 12:08:57.368492 4753 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/3628b07e-d247-4487-8095-821bf56656b8-client-ca\") on node \"crc\" DevicePath \"\"" Jan 29 12:08:57 crc kubenswrapper[4753]: I0129 12:08:57.397708 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3628b07e-d247-4487-8095-821bf56656b8-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "3628b07e-d247-4487-8095-821bf56656b8" (UID: "3628b07e-d247-4487-8095-821bf56656b8"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:08:58 crc kubenswrapper[4753]: I0129 12:08:58.219048 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6a22bc59-6a04-49b4-aa79-225e9571ee71-serving-cert\") pod \"6a22bc59-6a04-49b4-aa79-225e9571ee71\" (UID: \"6a22bc59-6a04-49b4-aa79-225e9571ee71\") " Jan 29 12:08:58 crc kubenswrapper[4753]: I0129 12:08:58.219146 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6a22bc59-6a04-49b4-aa79-225e9571ee71-client-ca\") pod \"6a22bc59-6a04-49b4-aa79-225e9571ee71\" (UID: \"6a22bc59-6a04-49b4-aa79-225e9571ee71\") " Jan 29 12:08:58 crc kubenswrapper[4753]: I0129 12:08:58.219183 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6a22bc59-6a04-49b4-aa79-225e9571ee71-config\") pod \"6a22bc59-6a04-49b4-aa79-225e9571ee71\" (UID: \"6a22bc59-6a04-49b4-aa79-225e9571ee71\") " Jan 29 12:08:58 crc kubenswrapper[4753]: I0129 12:08:58.219252 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q4hkh\" (UniqueName: \"kubernetes.io/projected/6a22bc59-6a04-49b4-aa79-225e9571ee71-kube-api-access-q4hkh\") pod \"6a22bc59-6a04-49b4-aa79-225e9571ee71\" (UID: \"6a22bc59-6a04-49b4-aa79-225e9571ee71\") " Jan 29 12:08:58 crc kubenswrapper[4753]: I0129 12:08:58.220448 4753 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3628b07e-d247-4487-8095-821bf56656b8-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 12:08:58 crc kubenswrapper[4753]: I0129 12:08:58.220964 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6a22bc59-6a04-49b4-aa79-225e9571ee71-client-ca" (OuterVolumeSpecName: "client-ca") pod "6a22bc59-6a04-49b4-aa79-225e9571ee71" (UID: "6a22bc59-6a04-49b4-aa79-225e9571ee71"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:08:58 crc kubenswrapper[4753]: I0129 12:08:58.220962 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6a22bc59-6a04-49b4-aa79-225e9571ee71-config" (OuterVolumeSpecName: "config") pod "6a22bc59-6a04-49b4-aa79-225e9571ee71" (UID: "6a22bc59-6a04-49b4-aa79-225e9571ee71"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:08:58 crc kubenswrapper[4753]: I0129 12:08:58.232684 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6a22bc59-6a04-49b4-aa79-225e9571ee71-kube-api-access-q4hkh" (OuterVolumeSpecName: "kube-api-access-q4hkh") pod "6a22bc59-6a04-49b4-aa79-225e9571ee71" (UID: "6a22bc59-6a04-49b4-aa79-225e9571ee71"). InnerVolumeSpecName "kube-api-access-q4hkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:08:58 crc kubenswrapper[4753]: I0129 12:08:58.235230 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6a22bc59-6a04-49b4-aa79-225e9571ee71-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6a22bc59-6a04-49b4-aa79-225e9571ee71" (UID: "6a22bc59-6a04-49b4-aa79-225e9571ee71"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:08:58 crc kubenswrapper[4753]: I0129 12:08:58.235451 4753 patch_prober.go:28] interesting pod/router-default-5444994796-jpxd9 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 29 12:08:58 crc kubenswrapper[4753]: [-]has-synced failed: reason withheld Jan 29 12:08:58 crc kubenswrapper[4753]: [+]process-running ok Jan 29 12:08:58 crc kubenswrapper[4753]: healthz check failed Jan 29 12:08:58 crc kubenswrapper[4753]: I0129 12:08:58.235524 4753 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-jpxd9" podUID="8e54e1ee-a1b8-454c-8b09-e252ca3fe9da" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 29 12:08:59 crc kubenswrapper[4753]: I0129 12:08:59.009925 4753 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6a22bc59-6a04-49b4-aa79-225e9571ee71-client-ca\") on node \"crc\" DevicePath \"\"" Jan 29 12:08:59 crc kubenswrapper[4753]: I0129 12:08:59.010265 4753 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6a22bc59-6a04-49b4-aa79-225e9571ee71-config\") on node \"crc\" DevicePath \"\"" Jan 29 12:08:59 crc kubenswrapper[4753]: I0129 12:08:59.010280 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q4hkh\" (UniqueName: \"kubernetes.io/projected/6a22bc59-6a04-49b4-aa79-225e9571ee71-kube-api-access-q4hkh\") on node \"crc\" DevicePath \"\"" Jan 29 12:08:59 crc kubenswrapper[4753]: I0129 12:08:59.010293 4753 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6a22bc59-6a04-49b4-aa79-225e9571ee71-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 12:08:59 crc kubenswrapper[4753]: I0129 12:08:59.057195 4753 patch_prober.go:28] interesting pod/console-f9d7485db-mql86 container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.15:8443/health\": dial tcp 10.217.0.15:8443: connect: connection refused" start-of-body= Jan 29 12:08:59 crc kubenswrapper[4753]: I0129 12:08:59.057292 4753 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-mql86" podUID="91845c42-ac54-4831-b8ac-73737902b703" containerName="console" probeResult="failure" output="Get \"https://10.217.0.15:8443/health\": dial tcp 10.217.0.15:8443: connect: connection refused" Jan 29 12:08:59 crc kubenswrapper[4753]: I0129 12:08:59.061352 4753 patch_prober.go:28] interesting pod/downloads-7954f5f757-b2rb9 container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" start-of-body= Jan 29 12:08:59 crc kubenswrapper[4753]: I0129 12:08:59.061401 4753 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-b2rb9" podUID="200ffbfe-dff4-45e2-944d-34a3ad56f018" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" Jan 29 12:08:59 crc kubenswrapper[4753]: I0129 12:08:59.061673 4753 patch_prober.go:28] interesting pod/downloads-7954f5f757-b2rb9 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" start-of-body= Jan 29 12:08:59 crc kubenswrapper[4753]: I0129 12:08:59.061727 4753 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-b2rb9" podUID="200ffbfe-dff4-45e2-944d-34a3ad56f018" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" Jan 29 12:08:59 crc kubenswrapper[4753]: I0129 12:08:59.080310 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-558db77b4-zdhjn" Jan 29 12:08:59 crc kubenswrapper[4753]: I0129 12:08:59.098050 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-86kqx" event={"ID":"3628b07e-d247-4487-8095-821bf56656b8","Type":"ContainerDied","Data":"b57d2cb29c67098417afae87af728ea55281aabfc05e533dc00bceea07872650"} Jan 29 12:08:59 crc kubenswrapper[4753]: I0129 12:08:59.098118 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-86kqx" Jan 29 12:08:59 crc kubenswrapper[4753]: I0129 12:08:59.098303 4753 scope.go:117] "RemoveContainer" containerID="2268ec3aed83352853aba4c1e7db0f6a129e057add397b1db4b3b469771803ee" Jan 29 12:08:59 crc kubenswrapper[4753]: I0129 12:08:59.146459 4753 patch_prober.go:28] interesting pod/router-default-5444994796-jpxd9 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 29 12:08:59 crc kubenswrapper[4753]: [-]has-synced failed: reason withheld Jan 29 12:08:59 crc kubenswrapper[4753]: [+]process-running ok Jan 29 12:08:59 crc kubenswrapper[4753]: healthz check failed Jan 29 12:08:59 crc kubenswrapper[4753]: I0129 12:08:59.146524 4753 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-jpxd9" podUID="8e54e1ee-a1b8-454c-8b09-e252ca3fe9da" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 29 12:08:59 crc kubenswrapper[4753]: I0129 12:08:59.147613 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-dx8db" Jan 29 12:08:59 crc kubenswrapper[4753]: I0129 12:08:59.148000 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-dx8db" event={"ID":"6a22bc59-6a04-49b4-aa79-225e9571ee71","Type":"ContainerDied","Data":"2e4e2110436710137fb70cf18d45e15b968ed1441affd071e5f30e0f02a3b5d4"} Jan 29 12:08:59 crc kubenswrapper[4753]: I0129 12:08:59.282018 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-69cc57b4b8-4x7d6"] Jan 29 12:08:59 crc kubenswrapper[4753]: E0129 12:08:59.282393 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6110c9b0-6745-4468-b8c6-02277a67883c" containerName="pruner" Jan 29 12:08:59 crc kubenswrapper[4753]: I0129 12:08:59.282465 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="6110c9b0-6745-4468-b8c6-02277a67883c" containerName="pruner" Jan 29 12:08:59 crc kubenswrapper[4753]: E0129 12:08:59.282497 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4f48974c-8787-482a-b962-f7646e12952e" containerName="collect-profiles" Jan 29 12:08:59 crc kubenswrapper[4753]: I0129 12:08:59.282505 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="4f48974c-8787-482a-b962-f7646e12952e" containerName="collect-profiles" Jan 29 12:08:59 crc kubenswrapper[4753]: E0129 12:08:59.282526 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3628b07e-d247-4487-8095-821bf56656b8" containerName="controller-manager" Jan 29 12:08:59 crc kubenswrapper[4753]: I0129 12:08:59.282534 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="3628b07e-d247-4487-8095-821bf56656b8" containerName="controller-manager" Jan 29 12:08:59 crc kubenswrapper[4753]: E0129 12:08:59.282544 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6ab526de-deef-4ec9-9523-95137afadeca" containerName="pruner" Jan 29 12:08:59 crc kubenswrapper[4753]: I0129 12:08:59.282553 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="6ab526de-deef-4ec9-9523-95137afadeca" containerName="pruner" Jan 29 12:08:59 crc kubenswrapper[4753]: E0129 12:08:59.282561 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6a22bc59-6a04-49b4-aa79-225e9571ee71" containerName="route-controller-manager" Jan 29 12:08:59 crc kubenswrapper[4753]: I0129 12:08:59.282568 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="6a22bc59-6a04-49b4-aa79-225e9571ee71" containerName="route-controller-manager" Jan 29 12:08:59 crc kubenswrapper[4753]: I0129 12:08:59.282809 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="3628b07e-d247-4487-8095-821bf56656b8" containerName="controller-manager" Jan 29 12:08:59 crc kubenswrapper[4753]: I0129 12:08:59.282844 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="6a22bc59-6a04-49b4-aa79-225e9571ee71" containerName="route-controller-manager" Jan 29 12:08:59 crc kubenswrapper[4753]: I0129 12:08:59.282856 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="4f48974c-8787-482a-b962-f7646e12952e" containerName="collect-profiles" Jan 29 12:08:59 crc kubenswrapper[4753]: I0129 12:08:59.282864 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="6ab526de-deef-4ec9-9523-95137afadeca" containerName="pruner" Jan 29 12:08:59 crc kubenswrapper[4753]: I0129 12:08:59.282876 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="6110c9b0-6745-4468-b8c6-02277a67883c" containerName="pruner" Jan 29 12:08:59 crc kubenswrapper[4753]: I0129 12:08:59.290097 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-8694c74fd8-qgldq"] Jan 29 12:08:59 crc kubenswrapper[4753]: I0129 12:08:59.295820 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-8694c74fd8-qgldq" Jan 29 12:08:59 crc kubenswrapper[4753]: I0129 12:08:59.301195 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-69cc57b4b8-4x7d6" Jan 29 12:08:59 crc kubenswrapper[4753]: I0129 12:08:59.301803 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-69cc57b4b8-4x7d6"] Jan 29 12:08:59 crc kubenswrapper[4753]: I0129 12:08:59.310509 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 29 12:08:59 crc kubenswrapper[4753]: I0129 12:08:59.314917 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 29 12:08:59 crc kubenswrapper[4753]: I0129 12:08:59.315270 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 29 12:08:59 crc kubenswrapper[4753]: I0129 12:08:59.315607 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 29 12:08:59 crc kubenswrapper[4753]: I0129 12:08:59.315793 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 29 12:08:59 crc kubenswrapper[4753]: I0129 12:08:59.315995 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 29 12:08:59 crc kubenswrapper[4753]: I0129 12:08:59.320560 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 29 12:08:59 crc kubenswrapper[4753]: I0129 12:08:59.321455 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 29 12:08:59 crc kubenswrapper[4753]: I0129 12:08:59.321509 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 29 12:08:59 crc kubenswrapper[4753]: I0129 12:08:59.321680 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 29 12:08:59 crc kubenswrapper[4753]: I0129 12:08:59.321874 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 29 12:08:59 crc kubenswrapper[4753]: I0129 12:08:59.322858 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 29 12:08:59 crc kubenswrapper[4753]: I0129 12:08:59.335319 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-86kqx"] Jan 29 12:08:59 crc kubenswrapper[4753]: I0129 12:08:59.354662 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 29 12:08:59 crc kubenswrapper[4753]: I0129 12:08:59.399659 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-8694c74fd8-qgldq"] Jan 29 12:08:59 crc kubenswrapper[4753]: I0129 12:08:59.399774 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-86kqx"] Jan 29 12:08:59 crc kubenswrapper[4753]: I0129 12:08:59.511420 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6ab7c0ec-f1e4-42ec-9b2f-21371d7129ca-config\") pod \"route-controller-manager-8694c74fd8-qgldq\" (UID: \"6ab7c0ec-f1e4-42ec-9b2f-21371d7129ca\") " pod="openshift-route-controller-manager/route-controller-manager-8694c74fd8-qgldq" Jan 29 12:08:59 crc kubenswrapper[4753]: I0129 12:08:59.512281 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/29a1227f-368f-45d2-a827-8b9e78ed8e5d-client-ca\") pod \"controller-manager-69cc57b4b8-4x7d6\" (UID: \"29a1227f-368f-45d2-a827-8b9e78ed8e5d\") " pod="openshift-controller-manager/controller-manager-69cc57b4b8-4x7d6" Jan 29 12:08:59 crc kubenswrapper[4753]: I0129 12:08:59.512409 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/29a1227f-368f-45d2-a827-8b9e78ed8e5d-proxy-ca-bundles\") pod \"controller-manager-69cc57b4b8-4x7d6\" (UID: \"29a1227f-368f-45d2-a827-8b9e78ed8e5d\") " pod="openshift-controller-manager/controller-manager-69cc57b4b8-4x7d6" Jan 29 12:08:59 crc kubenswrapper[4753]: I0129 12:08:59.512637 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/29a1227f-368f-45d2-a827-8b9e78ed8e5d-config\") pod \"controller-manager-69cc57b4b8-4x7d6\" (UID: \"29a1227f-368f-45d2-a827-8b9e78ed8e5d\") " pod="openshift-controller-manager/controller-manager-69cc57b4b8-4x7d6" Jan 29 12:08:59 crc kubenswrapper[4753]: I0129 12:08:59.512765 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6ab7c0ec-f1e4-42ec-9b2f-21371d7129ca-client-ca\") pod \"route-controller-manager-8694c74fd8-qgldq\" (UID: \"6ab7c0ec-f1e4-42ec-9b2f-21371d7129ca\") " pod="openshift-route-controller-manager/route-controller-manager-8694c74fd8-qgldq" Jan 29 12:08:59 crc kubenswrapper[4753]: I0129 12:08:59.512943 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/29a1227f-368f-45d2-a827-8b9e78ed8e5d-serving-cert\") pod \"controller-manager-69cc57b4b8-4x7d6\" (UID: \"29a1227f-368f-45d2-a827-8b9e78ed8e5d\") " pod="openshift-controller-manager/controller-manager-69cc57b4b8-4x7d6" Jan 29 12:08:59 crc kubenswrapper[4753]: I0129 12:08:59.513193 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f6nfs\" (UniqueName: \"kubernetes.io/projected/6ab7c0ec-f1e4-42ec-9b2f-21371d7129ca-kube-api-access-f6nfs\") pod \"route-controller-manager-8694c74fd8-qgldq\" (UID: \"6ab7c0ec-f1e4-42ec-9b2f-21371d7129ca\") " pod="openshift-route-controller-manager/route-controller-manager-8694c74fd8-qgldq" Jan 29 12:08:59 crc kubenswrapper[4753]: I0129 12:08:59.513394 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vf7gk\" (UniqueName: \"kubernetes.io/projected/29a1227f-368f-45d2-a827-8b9e78ed8e5d-kube-api-access-vf7gk\") pod \"controller-manager-69cc57b4b8-4x7d6\" (UID: \"29a1227f-368f-45d2-a827-8b9e78ed8e5d\") " pod="openshift-controller-manager/controller-manager-69cc57b4b8-4x7d6" Jan 29 12:08:59 crc kubenswrapper[4753]: I0129 12:08:59.513745 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6ab7c0ec-f1e4-42ec-9b2f-21371d7129ca-serving-cert\") pod \"route-controller-manager-8694c74fd8-qgldq\" (UID: \"6ab7c0ec-f1e4-42ec-9b2f-21371d7129ca\") " pod="openshift-route-controller-manager/route-controller-manager-8694c74fd8-qgldq" Jan 29 12:08:59 crc kubenswrapper[4753]: I0129 12:08:59.617202 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/29a1227f-368f-45d2-a827-8b9e78ed8e5d-config\") pod \"controller-manager-69cc57b4b8-4x7d6\" (UID: \"29a1227f-368f-45d2-a827-8b9e78ed8e5d\") " pod="openshift-controller-manager/controller-manager-69cc57b4b8-4x7d6" Jan 29 12:08:59 crc kubenswrapper[4753]: I0129 12:08:59.617827 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6ab7c0ec-f1e4-42ec-9b2f-21371d7129ca-client-ca\") pod \"route-controller-manager-8694c74fd8-qgldq\" (UID: \"6ab7c0ec-f1e4-42ec-9b2f-21371d7129ca\") " pod="openshift-route-controller-manager/route-controller-manager-8694c74fd8-qgldq" Jan 29 12:08:59 crc kubenswrapper[4753]: I0129 12:08:59.617981 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/29a1227f-368f-45d2-a827-8b9e78ed8e5d-serving-cert\") pod \"controller-manager-69cc57b4b8-4x7d6\" (UID: \"29a1227f-368f-45d2-a827-8b9e78ed8e5d\") " pod="openshift-controller-manager/controller-manager-69cc57b4b8-4x7d6" Jan 29 12:08:59 crc kubenswrapper[4753]: I0129 12:08:59.618141 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f6nfs\" (UniqueName: \"kubernetes.io/projected/6ab7c0ec-f1e4-42ec-9b2f-21371d7129ca-kube-api-access-f6nfs\") pod \"route-controller-manager-8694c74fd8-qgldq\" (UID: \"6ab7c0ec-f1e4-42ec-9b2f-21371d7129ca\") " pod="openshift-route-controller-manager/route-controller-manager-8694c74fd8-qgldq" Jan 29 12:08:59 crc kubenswrapper[4753]: I0129 12:08:59.618337 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vf7gk\" (UniqueName: \"kubernetes.io/projected/29a1227f-368f-45d2-a827-8b9e78ed8e5d-kube-api-access-vf7gk\") pod \"controller-manager-69cc57b4b8-4x7d6\" (UID: \"29a1227f-368f-45d2-a827-8b9e78ed8e5d\") " pod="openshift-controller-manager/controller-manager-69cc57b4b8-4x7d6" Jan 29 12:08:59 crc kubenswrapper[4753]: I0129 12:08:59.618468 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6ab7c0ec-f1e4-42ec-9b2f-21371d7129ca-serving-cert\") pod \"route-controller-manager-8694c74fd8-qgldq\" (UID: \"6ab7c0ec-f1e4-42ec-9b2f-21371d7129ca\") " pod="openshift-route-controller-manager/route-controller-manager-8694c74fd8-qgldq" Jan 29 12:08:59 crc kubenswrapper[4753]: I0129 12:08:59.618628 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6ab7c0ec-f1e4-42ec-9b2f-21371d7129ca-config\") pod \"route-controller-manager-8694c74fd8-qgldq\" (UID: \"6ab7c0ec-f1e4-42ec-9b2f-21371d7129ca\") " pod="openshift-route-controller-manager/route-controller-manager-8694c74fd8-qgldq" Jan 29 12:08:59 crc kubenswrapper[4753]: I0129 12:08:59.618792 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/29a1227f-368f-45d2-a827-8b9e78ed8e5d-client-ca\") pod \"controller-manager-69cc57b4b8-4x7d6\" (UID: \"29a1227f-368f-45d2-a827-8b9e78ed8e5d\") " pod="openshift-controller-manager/controller-manager-69cc57b4b8-4x7d6" Jan 29 12:08:59 crc kubenswrapper[4753]: I0129 12:08:59.618978 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/29a1227f-368f-45d2-a827-8b9e78ed8e5d-proxy-ca-bundles\") pod \"controller-manager-69cc57b4b8-4x7d6\" (UID: \"29a1227f-368f-45d2-a827-8b9e78ed8e5d\") " pod="openshift-controller-manager/controller-manager-69cc57b4b8-4x7d6" Jan 29 12:09:00 crc kubenswrapper[4753]: I0129 12:09:00.253114 4753 scope.go:117] "RemoveContainer" containerID="c65e0e7b24bf4c9f886988311d2280381b0facfcaf1b00f7415658001768b087" Jan 29 12:09:00 crc kubenswrapper[4753]: I0129 12:09:00.277899 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6ab7c0ec-f1e4-42ec-9b2f-21371d7129ca-client-ca\") pod \"route-controller-manager-8694c74fd8-qgldq\" (UID: \"6ab7c0ec-f1e4-42ec-9b2f-21371d7129ca\") " pod="openshift-route-controller-manager/route-controller-manager-8694c74fd8-qgldq" Jan 29 12:09:00 crc kubenswrapper[4753]: I0129 12:09:00.752744 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/29a1227f-368f-45d2-a827-8b9e78ed8e5d-config\") pod \"controller-manager-69cc57b4b8-4x7d6\" (UID: \"29a1227f-368f-45d2-a827-8b9e78ed8e5d\") " pod="openshift-controller-manager/controller-manager-69cc57b4b8-4x7d6" Jan 29 12:09:00 crc kubenswrapper[4753]: I0129 12:09:00.783760 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/29a1227f-368f-45d2-a827-8b9e78ed8e5d-proxy-ca-bundles\") pod \"controller-manager-69cc57b4b8-4x7d6\" (UID: \"29a1227f-368f-45d2-a827-8b9e78ed8e5d\") " pod="openshift-controller-manager/controller-manager-69cc57b4b8-4x7d6" Jan 29 12:09:00 crc kubenswrapper[4753]: I0129 12:09:00.843503 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/29a1227f-368f-45d2-a827-8b9e78ed8e5d-serving-cert\") pod \"controller-manager-69cc57b4b8-4x7d6\" (UID: \"29a1227f-368f-45d2-a827-8b9e78ed8e5d\") " pod="openshift-controller-manager/controller-manager-69cc57b4b8-4x7d6" Jan 29 12:09:00 crc kubenswrapper[4753]: I0129 12:09:00.847969 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6ab7c0ec-f1e4-42ec-9b2f-21371d7129ca-config\") pod \"route-controller-manager-8694c74fd8-qgldq\" (UID: \"6ab7c0ec-f1e4-42ec-9b2f-21371d7129ca\") " pod="openshift-route-controller-manager/route-controller-manager-8694c74fd8-qgldq" Jan 29 12:09:00 crc kubenswrapper[4753]: I0129 12:09:00.849004 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6ab7c0ec-f1e4-42ec-9b2f-21371d7129ca-serving-cert\") pod \"route-controller-manager-8694c74fd8-qgldq\" (UID: \"6ab7c0ec-f1e4-42ec-9b2f-21371d7129ca\") " pod="openshift-route-controller-manager/route-controller-manager-8694c74fd8-qgldq" Jan 29 12:09:00 crc kubenswrapper[4753]: I0129 12:09:00.875256 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/29a1227f-368f-45d2-a827-8b9e78ed8e5d-client-ca\") pod \"controller-manager-69cc57b4b8-4x7d6\" (UID: \"29a1227f-368f-45d2-a827-8b9e78ed8e5d\") " pod="openshift-controller-manager/controller-manager-69cc57b4b8-4x7d6" Jan 29 12:09:00 crc kubenswrapper[4753]: I0129 12:09:00.918534 4753 patch_prober.go:28] interesting pod/router-default-5444994796-jpxd9 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 29 12:09:00 crc kubenswrapper[4753]: [-]has-synced failed: reason withheld Jan 29 12:09:00 crc kubenswrapper[4753]: [+]process-running ok Jan 29 12:09:00 crc kubenswrapper[4753]: healthz check failed Jan 29 12:09:00 crc kubenswrapper[4753]: I0129 12:09:00.918656 4753 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-jpxd9" podUID="8e54e1ee-a1b8-454c-8b09-e252ca3fe9da" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 29 12:09:00 crc kubenswrapper[4753]: I0129 12:09:00.942121 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f6nfs\" (UniqueName: \"kubernetes.io/projected/6ab7c0ec-f1e4-42ec-9b2f-21371d7129ca-kube-api-access-f6nfs\") pod \"route-controller-manager-8694c74fd8-qgldq\" (UID: \"6ab7c0ec-f1e4-42ec-9b2f-21371d7129ca\") " pod="openshift-route-controller-manager/route-controller-manager-8694c74fd8-qgldq" Jan 29 12:09:01 crc kubenswrapper[4753]: I0129 12:09:01.017709 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vf7gk\" (UniqueName: \"kubernetes.io/projected/29a1227f-368f-45d2-a827-8b9e78ed8e5d-kube-api-access-vf7gk\") pod \"controller-manager-69cc57b4b8-4x7d6\" (UID: \"29a1227f-368f-45d2-a827-8b9e78ed8e5d\") " pod="openshift-controller-manager/controller-manager-69cc57b4b8-4x7d6" Jan 29 12:09:01 crc kubenswrapper[4753]: I0129 12:09:01.017935 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-69cc57b4b8-4x7d6" Jan 29 12:09:01 crc kubenswrapper[4753]: I0129 12:09:01.057030 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3628b07e-d247-4487-8095-821bf56656b8" path="/var/lib/kubelet/pods/3628b07e-d247-4487-8095-821bf56656b8/volumes" Jan 29 12:09:01 crc kubenswrapper[4753]: I0129 12:09:01.058164 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-dx8db"] Jan 29 12:09:01 crc kubenswrapper[4753]: I0129 12:09:01.058191 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-dx8db"] Jan 29 12:09:01 crc kubenswrapper[4753]: I0129 12:09:01.129851 4753 patch_prober.go:28] interesting pod/router-default-5444994796-jpxd9 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 29 12:09:01 crc kubenswrapper[4753]: [-]has-synced failed: reason withheld Jan 29 12:09:01 crc kubenswrapper[4753]: [+]process-running ok Jan 29 12:09:01 crc kubenswrapper[4753]: healthz check failed Jan 29 12:09:01 crc kubenswrapper[4753]: I0129 12:09:01.129930 4753 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-jpxd9" podUID="8e54e1ee-a1b8-454c-8b09-e252ca3fe9da" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 29 12:09:01 crc kubenswrapper[4753]: I0129 12:09:01.156922 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-8694c74fd8-qgldq" Jan 29 12:09:01 crc kubenswrapper[4753]: I0129 12:09:01.809032 4753 patch_prober.go:28] interesting pod/image-registry-697d97f7c8-w6vx6 container/registry namespace/openshift-image-registry: Liveness probe status=failure output="Get \"https://10.217.0.23:5000/healthz\": dial tcp 10.217.0.23:5000: connect: connection refused" start-of-body= Jan 29 12:09:01 crc kubenswrapper[4753]: I0129 12:09:01.809112 4753 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" podUID="df61f830-b312-4a01-8d17-057799312936" containerName="registry" probeResult="failure" output="Get \"https://10.217.0.23:5000/healthz\": dial tcp 10.217.0.23:5000: connect: connection refused" Jan 29 12:09:02 crc kubenswrapper[4753]: I0129 12:09:02.046613 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6a22bc59-6a04-49b4-aa79-225e9571ee71" path="/var/lib/kubelet/pods/6a22bc59-6a04-49b4-aa79-225e9571ee71/volumes" Jan 29 12:09:02 crc kubenswrapper[4753]: I0129 12:09:02.222082 4753 patch_prober.go:28] interesting pod/router-default-5444994796-jpxd9 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 29 12:09:02 crc kubenswrapper[4753]: [-]has-synced failed: reason withheld Jan 29 12:09:02 crc kubenswrapper[4753]: [+]process-running ok Jan 29 12:09:02 crc kubenswrapper[4753]: healthz check failed Jan 29 12:09:02 crc kubenswrapper[4753]: I0129 12:09:02.222161 4753 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-jpxd9" podUID="8e54e1ee-a1b8-454c-8b09-e252ca3fe9da" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 29 12:09:02 crc kubenswrapper[4753]: I0129 12:09:02.687798 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-69cc57b4b8-4x7d6"] Jan 29 12:09:02 crc kubenswrapper[4753]: W0129 12:09:02.746704 4753 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod29a1227f_368f_45d2_a827_8b9e78ed8e5d.slice/crio-ad20baf9cd64287dbad14e1af0effa536a34b9eba35117b113f61b0ce40021e3 WatchSource:0}: Error finding container ad20baf9cd64287dbad14e1af0effa536a34b9eba35117b113f61b0ce40021e3: Status 404 returned error can't find the container with id ad20baf9cd64287dbad14e1af0effa536a34b9eba35117b113f61b0ce40021e3 Jan 29 12:09:03 crc kubenswrapper[4753]: I0129 12:09:03.096837 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-8694c74fd8-qgldq"] Jan 29 12:09:03 crc kubenswrapper[4753]: I0129 12:09:03.185132 4753 patch_prober.go:28] interesting pod/router-default-5444994796-jpxd9 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 29 12:09:03 crc kubenswrapper[4753]: [-]has-synced failed: reason withheld Jan 29 12:09:03 crc kubenswrapper[4753]: [+]process-running ok Jan 29 12:09:03 crc kubenswrapper[4753]: healthz check failed Jan 29 12:09:03 crc kubenswrapper[4753]: I0129 12:09:03.185243 4753 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-jpxd9" podUID="8e54e1ee-a1b8-454c-8b09-e252ca3fe9da" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 29 12:09:03 crc kubenswrapper[4753]: W0129 12:09:03.211987 4753 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6ab7c0ec_f1e4_42ec_9b2f_21371d7129ca.slice/crio-e83adfc6cc0652853c8677726661a0a6e916c9dfb9cef421b3520961b2b461af WatchSource:0}: Error finding container e83adfc6cc0652853c8677726661a0a6e916c9dfb9cef421b3520961b2b461af: Status 404 returned error can't find the container with id e83adfc6cc0652853c8677726661a0a6e916c9dfb9cef421b3520961b2b461af Jan 29 12:09:03 crc kubenswrapper[4753]: I0129 12:09:03.311675 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-8694c74fd8-qgldq" event={"ID":"6ab7c0ec-f1e4-42ec-9b2f-21371d7129ca","Type":"ContainerStarted","Data":"e83adfc6cc0652853c8677726661a0a6e916c9dfb9cef421b3520961b2b461af"} Jan 29 12:09:03 crc kubenswrapper[4753]: I0129 12:09:03.316873 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-69cc57b4b8-4x7d6" event={"ID":"29a1227f-368f-45d2-a827-8b9e78ed8e5d","Type":"ContainerStarted","Data":"ad20baf9cd64287dbad14e1af0effa536a34b9eba35117b113f61b0ce40021e3"} Jan 29 12:09:03 crc kubenswrapper[4753]: I0129 12:09:03.399316 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 12:09:03 crc kubenswrapper[4753]: I0129 12:09:03.399429 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 12:09:03 crc kubenswrapper[4753]: I0129 12:09:03.402090 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Jan 29 12:09:03 crc kubenswrapper[4753]: I0129 12:09:03.402380 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Jan 29 12:09:03 crc kubenswrapper[4753]: I0129 12:09:03.528834 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 12:09:03 crc kubenswrapper[4753]: I0129 12:09:03.533647 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 12:09:03 crc kubenswrapper[4753]: I0129 12:09:03.749017 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 12:09:05 crc kubenswrapper[4753]: I0129 12:09:05.132477 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 12:09:05 crc kubenswrapper[4753]: I0129 12:09:05.132667 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 12:09:05 crc kubenswrapper[4753]: I0129 12:09:05.165191 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Jan 29 12:09:05 crc kubenswrapper[4753]: I0129 12:09:05.176474 4753 patch_prober.go:28] interesting pod/router-default-5444994796-jpxd9 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 29 12:09:05 crc kubenswrapper[4753]: [-]has-synced failed: reason withheld Jan 29 12:09:05 crc kubenswrapper[4753]: [+]process-running ok Jan 29 12:09:05 crc kubenswrapper[4753]: healthz check failed Jan 29 12:09:05 crc kubenswrapper[4753]: I0129 12:09:05.176548 4753 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-jpxd9" podUID="8e54e1ee-a1b8-454c-8b09-e252ca3fe9da" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 29 12:09:05 crc kubenswrapper[4753]: I0129 12:09:05.182753 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Jan 29 12:09:05 crc kubenswrapper[4753]: E0129 12:09:05.214700 4753 kubelet.go:2526] "Housekeeping took longer than expected" err="housekeeping took too long" expected="1s" actual="1.256s" Jan 29 12:09:05 crc kubenswrapper[4753]: I0129 12:09:05.216157 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 12:09:05 crc kubenswrapper[4753]: I0129 12:09:05.217658 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 12:09:05 crc kubenswrapper[4753]: I0129 12:09:05.242922 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-69cc57b4b8-4x7d6" event={"ID":"29a1227f-368f-45d2-a827-8b9e78ed8e5d","Type":"ContainerStarted","Data":"e7d2f0546894f2aad1d7793f73ff8857c12bfa0d2b0882ce799ccc3e00c50cad"} Jan 29 12:09:05 crc kubenswrapper[4753]: I0129 12:09:05.320994 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-69cc57b4b8-4x7d6" Jan 29 12:09:06 crc kubenswrapper[4753]: I0129 12:09:06.019614 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-8694c74fd8-qgldq" Jan 29 12:09:06 crc kubenswrapper[4753]: I0129 12:09:06.031964 4753 patch_prober.go:28] interesting pod/controller-manager-69cc57b4b8-4x7d6 container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.54:8443/healthz\": dial tcp 10.217.0.54:8443: connect: connection refused" start-of-body= Jan 29 12:09:06 crc kubenswrapper[4753]: I0129 12:09:06.032072 4753 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-69cc57b4b8-4x7d6" podUID="29a1227f-368f-45d2-a827-8b9e78ed8e5d" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.54:8443/healthz\": dial tcp 10.217.0.54:8443: connect: connection refused" Jan 29 12:09:06 crc kubenswrapper[4753]: I0129 12:09:06.034327 4753 patch_prober.go:28] interesting pod/route-controller-manager-8694c74fd8-qgldq container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.55:8443/healthz\": dial tcp 10.217.0.55:8443: connect: connection refused" start-of-body= Jan 29 12:09:06 crc kubenswrapper[4753]: I0129 12:09:06.034376 4753 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-8694c74fd8-qgldq" podUID="6ab7c0ec-f1e4-42ec-9b2f-21371d7129ca" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.55:8443/healthz\": dial tcp 10.217.0.55:8443: connect: connection refused" Jan 29 12:09:06 crc kubenswrapper[4753]: I0129 12:09:06.091865 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 12:09:06 crc kubenswrapper[4753]: I0129 12:09:06.096856 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 12:09:06 crc kubenswrapper[4753]: I0129 12:09:06.135631 4753 patch_prober.go:28] interesting pod/router-default-5444994796-jpxd9 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 29 12:09:06 crc kubenswrapper[4753]: [-]has-synced failed: reason withheld Jan 29 12:09:06 crc kubenswrapper[4753]: [+]process-running ok Jan 29 12:09:06 crc kubenswrapper[4753]: healthz check failed Jan 29 12:09:06 crc kubenswrapper[4753]: I0129 12:09:06.135721 4753 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-jpxd9" podUID="8e54e1ee-a1b8-454c-8b09-e252ca3fe9da" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 29 12:09:06 crc kubenswrapper[4753]: I0129 12:09:06.236605 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-69cc57b4b8-4x7d6" podStartSLOduration=11.236568236 podStartE2EDuration="11.236568236s" podCreationTimestamp="2026-01-29 12:08:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:09:06.131639085 +0000 UTC m=+160.383720540" watchObservedRunningTime="2026-01-29 12:09:06.236568236 +0000 UTC m=+160.488649691" Jan 29 12:09:07 crc kubenswrapper[4753]: I0129 12:09:07.115952 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-8694c74fd8-qgldq" event={"ID":"6ab7c0ec-f1e4-42ec-9b2f-21371d7129ca","Type":"ContainerStarted","Data":"72298778b5d691c735b30123fc8e5f178bf444d3c8a3980f1d0b0843a0da37f9"} Jan 29 12:09:07 crc kubenswrapper[4753]: I0129 12:09:07.156323 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-69cc57b4b8-4x7d6" Jan 29 12:09:07 crc kubenswrapper[4753]: I0129 12:09:07.246537 4753 patch_prober.go:28] interesting pod/router-default-5444994796-jpxd9 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 29 12:09:07 crc kubenswrapper[4753]: [+]has-synced ok Jan 29 12:09:07 crc kubenswrapper[4753]: [+]process-running ok Jan 29 12:09:07 crc kubenswrapper[4753]: healthz check failed Jan 29 12:09:07 crc kubenswrapper[4753]: I0129 12:09:07.246639 4753 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-jpxd9" podUID="8e54e1ee-a1b8-454c-8b09-e252ca3fe9da" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 29 12:09:07 crc kubenswrapper[4753]: I0129 12:09:07.289974 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-8694c74fd8-qgldq" podStartSLOduration=12.289947845 podStartE2EDuration="12.289947845s" podCreationTimestamp="2026-01-29 12:08:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:09:06.994751318 +0000 UTC m=+161.246832773" watchObservedRunningTime="2026-01-29 12:09:07.289947845 +0000 UTC m=+161.542029300" Jan 29 12:09:08 crc kubenswrapper[4753]: I0129 12:09:08.118315 4753 patch_prober.go:28] interesting pod/route-controller-manager-8694c74fd8-qgldq container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.55:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 29 12:09:08 crc kubenswrapper[4753]: I0129 12:09:08.118698 4753 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-8694c74fd8-qgldq" podUID="6ab7c0ec-f1e4-42ec-9b2f-21371d7129ca" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.55:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 29 12:09:08 crc kubenswrapper[4753]: I0129 12:09:08.132929 4753 patch_prober.go:28] interesting pod/router-default-5444994796-jpxd9 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 29 12:09:08 crc kubenswrapper[4753]: [+]has-synced ok Jan 29 12:09:08 crc kubenswrapper[4753]: [+]process-running ok Jan 29 12:09:08 crc kubenswrapper[4753]: healthz check failed Jan 29 12:09:08 crc kubenswrapper[4753]: I0129 12:09:08.132993 4753 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-jpxd9" podUID="8e54e1ee-a1b8-454c-8b09-e252ca3fe9da" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 29 12:09:08 crc kubenswrapper[4753]: I0129 12:09:08.575885 4753 patch_prober.go:28] interesting pod/downloads-7954f5f757-b2rb9 container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" start-of-body= Jan 29 12:09:08 crc kubenswrapper[4753]: I0129 12:09:08.576322 4753 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-b2rb9" podUID="200ffbfe-dff4-45e2-944d-34a3ad56f018" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" Jan 29 12:09:08 crc kubenswrapper[4753]: I0129 12:09:08.576399 4753 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-console/downloads-7954f5f757-b2rb9" Jan 29 12:09:08 crc kubenswrapper[4753]: I0129 12:09:08.577164 4753 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="download-server" containerStatusID={"Type":"cri-o","ID":"57e3502968cdee1c7892834df8e019409c473c88421470a379ea0d888bc9c4dc"} pod="openshift-console/downloads-7954f5f757-b2rb9" containerMessage="Container download-server failed liveness probe, will be restarted" Jan 29 12:09:08 crc kubenswrapper[4753]: I0129 12:09:08.577321 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/downloads-7954f5f757-b2rb9" podUID="200ffbfe-dff4-45e2-944d-34a3ad56f018" containerName="download-server" containerID="cri-o://57e3502968cdee1c7892834df8e019409c473c88421470a379ea0d888bc9c4dc" gracePeriod=2 Jan 29 12:09:08 crc kubenswrapper[4753]: I0129 12:09:08.577825 4753 patch_prober.go:28] interesting pod/downloads-7954f5f757-b2rb9 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" start-of-body= Jan 29 12:09:08 crc kubenswrapper[4753]: I0129 12:09:08.577850 4753 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-b2rb9" podUID="200ffbfe-dff4-45e2-944d-34a3ad56f018" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" Jan 29 12:09:08 crc kubenswrapper[4753]: I0129 12:09:08.578140 4753 patch_prober.go:28] interesting pod/downloads-7954f5f757-b2rb9 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" start-of-body= Jan 29 12:09:08 crc kubenswrapper[4753]: I0129 12:09:08.578162 4753 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-b2rb9" podUID="200ffbfe-dff4-45e2-944d-34a3ad56f018" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" Jan 29 12:09:08 crc kubenswrapper[4753]: I0129 12:09:08.582644 4753 patch_prober.go:28] interesting pod/console-f9d7485db-mql86 container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.15:8443/health\": dial tcp 10.217.0.15:8443: connect: connection refused" start-of-body= Jan 29 12:09:08 crc kubenswrapper[4753]: I0129 12:09:08.582792 4753 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-mql86" podUID="91845c42-ac54-4831-b8ac-73737902b703" containerName="console" probeResult="failure" output="Get \"https://10.217.0.15:8443/health\": dial tcp 10.217.0.15:8443: connect: connection refused" Jan 29 12:09:08 crc kubenswrapper[4753]: I0129 12:09:08.655637 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-779px" Jan 29 12:09:08 crc kubenswrapper[4753]: I0129 12:09:08.758111 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-8694c74fd8-qgldq" Jan 29 12:09:09 crc kubenswrapper[4753]: I0129 12:09:09.156453 4753 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-5444994796-jpxd9" Jan 29 12:09:09 crc kubenswrapper[4753]: I0129 12:09:09.170857 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-5444994796-jpxd9" Jan 29 12:09:09 crc kubenswrapper[4753]: W0129 12:09:09.555712 4753 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5fe485a1_e14f_4c09_b5b9_f252bc42b7e8.slice/crio-451339da7acc0d3cc935ec338be9364668527a5824fc1c4341bb3b4aebbbe93d WatchSource:0}: Error finding container 451339da7acc0d3cc935ec338be9364668527a5824fc1c4341bb3b4aebbbe93d: Status 404 returned error can't find the container with id 451339da7acc0d3cc935ec338be9364668527a5824fc1c4341bb3b4aebbbe93d Jan 29 12:09:09 crc kubenswrapper[4753]: I0129 12:09:09.783076 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"451339da7acc0d3cc935ec338be9364668527a5824fc1c4341bb3b4aebbbe93d"} Jan 29 12:09:09 crc kubenswrapper[4753]: I0129 12:09:09.799370 4753 generic.go:334] "Generic (PLEG): container finished" podID="200ffbfe-dff4-45e2-944d-34a3ad56f018" containerID="57e3502968cdee1c7892834df8e019409c473c88421470a379ea0d888bc9c4dc" exitCode=0 Jan 29 12:09:09 crc kubenswrapper[4753]: I0129 12:09:09.800279 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-b2rb9" event={"ID":"200ffbfe-dff4-45e2-944d-34a3ad56f018","Type":"ContainerDied","Data":"57e3502968cdee1c7892834df8e019409c473c88421470a379ea0d888bc9c4dc"} Jan 29 12:09:10 crc kubenswrapper[4753]: W0129 12:09:10.408636 4753 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9d751cbb_f2e2_430d_9754_c882a5e924a5.slice/crio-c25a90da96d1b3c77aef9a2e696a261f8c072e26fe4b088b891d8820b7ee85ae WatchSource:0}: Error finding container c25a90da96d1b3c77aef9a2e696a261f8c072e26fe4b088b891d8820b7ee85ae: Status 404 returned error can't find the container with id c25a90da96d1b3c77aef9a2e696a261f8c072e26fe4b088b891d8820b7ee85ae Jan 29 12:09:10 crc kubenswrapper[4753]: I0129 12:09:10.874200 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"c25a90da96d1b3c77aef9a2e696a261f8c072e26fe4b088b891d8820b7ee85ae"} Jan 29 12:09:10 crc kubenswrapper[4753]: I0129 12:09:10.881895 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"dc2a0ac6ead47ff74e4779c19af56311223f00bf22d1ec2c393a00ea73d964a1"} Jan 29 12:09:11 crc kubenswrapper[4753]: I0129 12:09:11.903276 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-b2rb9" event={"ID":"200ffbfe-dff4-45e2-944d-34a3ad56f018","Type":"ContainerStarted","Data":"ef0121188c254337af0c007914b35d5b2619129cddc2287bb20ea16c8b65b6a3"} Jan 29 12:09:11 crc kubenswrapper[4753]: I0129 12:09:11.904257 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-b2rb9" Jan 29 12:09:11 crc kubenswrapper[4753]: I0129 12:09:11.911525 4753 patch_prober.go:28] interesting pod/downloads-7954f5f757-b2rb9 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" start-of-body= Jan 29 12:09:11 crc kubenswrapper[4753]: I0129 12:09:11.911643 4753 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-b2rb9" podUID="200ffbfe-dff4-45e2-944d-34a3ad56f018" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" Jan 29 12:09:11 crc kubenswrapper[4753]: I0129 12:09:11.927438 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"846ff1454c4017c2ffd6cbd141f5d3af7fcbad80f51a3b83ddb8f760a094feb0"} Jan 29 12:09:12 crc kubenswrapper[4753]: I0129 12:09:12.110558 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-69cc57b4b8-4x7d6"] Jan 29 12:09:12 crc kubenswrapper[4753]: I0129 12:09:12.111973 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-69cc57b4b8-4x7d6" podUID="29a1227f-368f-45d2-a827-8b9e78ed8e5d" containerName="controller-manager" containerID="cri-o://e7d2f0546894f2aad1d7793f73ff8857c12bfa0d2b0882ce799ccc3e00c50cad" gracePeriod=30 Jan 29 12:09:12 crc kubenswrapper[4753]: I0129 12:09:12.150157 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Jan 29 12:09:12 crc kubenswrapper[4753]: I0129 12:09:12.151258 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 29 12:09:12 crc kubenswrapper[4753]: I0129 12:09:12.167660 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Jan 29 12:09:12 crc kubenswrapper[4753]: I0129 12:09:12.167766 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Jan 29 12:09:12 crc kubenswrapper[4753]: I0129 12:09:12.171468 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Jan 29 12:09:12 crc kubenswrapper[4753]: I0129 12:09:12.222996 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-8694c74fd8-qgldq"] Jan 29 12:09:12 crc kubenswrapper[4753]: I0129 12:09:12.223251 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-8694c74fd8-qgldq" podUID="6ab7c0ec-f1e4-42ec-9b2f-21371d7129ca" containerName="route-controller-manager" containerID="cri-o://72298778b5d691c735b30123fc8e5f178bf444d3c8a3980f1d0b0843a0da37f9" gracePeriod=30 Jan 29 12:09:12 crc kubenswrapper[4753]: I0129 12:09:12.239280 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:09:12 crc kubenswrapper[4753]: I0129 12:09:12.315312 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/8939830c-019d-4f94-8242-c2f68f0ecd40-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"8939830c-019d-4f94-8242-c2f68f0ecd40\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 29 12:09:12 crc kubenswrapper[4753]: I0129 12:09:12.315633 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/8939830c-019d-4f94-8242-c2f68f0ecd40-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"8939830c-019d-4f94-8242-c2f68f0ecd40\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 29 12:09:12 crc kubenswrapper[4753]: I0129 12:09:12.519747 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/8939830c-019d-4f94-8242-c2f68f0ecd40-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"8939830c-019d-4f94-8242-c2f68f0ecd40\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 29 12:09:12 crc kubenswrapper[4753]: I0129 12:09:12.519822 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/8939830c-019d-4f94-8242-c2f68f0ecd40-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"8939830c-019d-4f94-8242-c2f68f0ecd40\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 29 12:09:12 crc kubenswrapper[4753]: I0129 12:09:12.519937 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/8939830c-019d-4f94-8242-c2f68f0ecd40-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"8939830c-019d-4f94-8242-c2f68f0ecd40\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 29 12:09:12 crc kubenswrapper[4753]: I0129 12:09:12.562662 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/8939830c-019d-4f94-8242-c2f68f0ecd40-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"8939830c-019d-4f94-8242-c2f68f0ecd40\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 29 12:09:12 crc kubenswrapper[4753]: I0129 12:09:12.819740 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 29 12:09:12 crc kubenswrapper[4753]: I0129 12:09:12.948083 4753 generic.go:334] "Generic (PLEG): container finished" podID="29a1227f-368f-45d2-a827-8b9e78ed8e5d" containerID="e7d2f0546894f2aad1d7793f73ff8857c12bfa0d2b0882ce799ccc3e00c50cad" exitCode=0 Jan 29 12:09:12 crc kubenswrapper[4753]: I0129 12:09:12.948212 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-69cc57b4b8-4x7d6" event={"ID":"29a1227f-368f-45d2-a827-8b9e78ed8e5d","Type":"ContainerDied","Data":"e7d2f0546894f2aad1d7793f73ff8857c12bfa0d2b0882ce799ccc3e00c50cad"} Jan 29 12:09:12 crc kubenswrapper[4753]: I0129 12:09:12.952495 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"ab19294c54b69cdb2300d626eba4e060d38e574d8c015211e19e9877225f00e2"} Jan 29 12:09:12 crc kubenswrapper[4753]: I0129 12:09:12.964948 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"8871fcd817a63f5ef07e38c855a0b541b3051f5cf105f424feaf696ec8ec8c33"} Jan 29 12:09:12 crc kubenswrapper[4753]: I0129 12:09:12.965735 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 12:09:12 crc kubenswrapper[4753]: I0129 12:09:12.969902 4753 generic.go:334] "Generic (PLEG): container finished" podID="6ab7c0ec-f1e4-42ec-9b2f-21371d7129ca" containerID="72298778b5d691c735b30123fc8e5f178bf444d3c8a3980f1d0b0843a0da37f9" exitCode=0 Jan 29 12:09:12 crc kubenswrapper[4753]: I0129 12:09:12.969984 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-8694c74fd8-qgldq" event={"ID":"6ab7c0ec-f1e4-42ec-9b2f-21371d7129ca","Type":"ContainerDied","Data":"72298778b5d691c735b30123fc8e5f178bf444d3c8a3980f1d0b0843a0da37f9"} Jan 29 12:09:12 crc kubenswrapper[4753]: I0129 12:09:12.971173 4753 patch_prober.go:28] interesting pod/downloads-7954f5f757-b2rb9 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" start-of-body= Jan 29 12:09:12 crc kubenswrapper[4753]: I0129 12:09:12.971248 4753 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-b2rb9" podUID="200ffbfe-dff4-45e2-944d-34a3ad56f018" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" Jan 29 12:09:14 crc kubenswrapper[4753]: I0129 12:09:14.453641 4753 patch_prober.go:28] interesting pod/downloads-7954f5f757-b2rb9 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" start-of-body= Jan 29 12:09:14 crc kubenswrapper[4753]: I0129 12:09:14.454377 4753 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-b2rb9" podUID="200ffbfe-dff4-45e2-944d-34a3ad56f018" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" Jan 29 12:09:15 crc kubenswrapper[4753]: I0129 12:09:15.066151 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-69cc57b4b8-4x7d6" Jan 29 12:09:15 crc kubenswrapper[4753]: I0129 12:09:15.339307 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/29a1227f-368f-45d2-a827-8b9e78ed8e5d-proxy-ca-bundles\") pod \"29a1227f-368f-45d2-a827-8b9e78ed8e5d\" (UID: \"29a1227f-368f-45d2-a827-8b9e78ed8e5d\") " Jan 29 12:09:15 crc kubenswrapper[4753]: I0129 12:09:15.340805 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/29a1227f-368f-45d2-a827-8b9e78ed8e5d-config\") pod \"29a1227f-368f-45d2-a827-8b9e78ed8e5d\" (UID: \"29a1227f-368f-45d2-a827-8b9e78ed8e5d\") " Jan 29 12:09:15 crc kubenswrapper[4753]: I0129 12:09:15.341061 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/29a1227f-368f-45d2-a827-8b9e78ed8e5d-client-ca\") pod \"29a1227f-368f-45d2-a827-8b9e78ed8e5d\" (UID: \"29a1227f-368f-45d2-a827-8b9e78ed8e5d\") " Jan 29 12:09:15 crc kubenswrapper[4753]: I0129 12:09:15.349215 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/29a1227f-368f-45d2-a827-8b9e78ed8e5d-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "29a1227f-368f-45d2-a827-8b9e78ed8e5d" (UID: "29a1227f-368f-45d2-a827-8b9e78ed8e5d"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:09:15 crc kubenswrapper[4753]: I0129 12:09:15.349552 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/29a1227f-368f-45d2-a827-8b9e78ed8e5d-config" (OuterVolumeSpecName: "config") pod "29a1227f-368f-45d2-a827-8b9e78ed8e5d" (UID: "29a1227f-368f-45d2-a827-8b9e78ed8e5d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:09:15 crc kubenswrapper[4753]: I0129 12:09:15.354193 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/29a1227f-368f-45d2-a827-8b9e78ed8e5d-client-ca" (OuterVolumeSpecName: "client-ca") pod "29a1227f-368f-45d2-a827-8b9e78ed8e5d" (UID: "29a1227f-368f-45d2-a827-8b9e78ed8e5d"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:09:15 crc kubenswrapper[4753]: I0129 12:09:15.405388 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-55788fcfd7-spvgx"] Jan 29 12:09:15 crc kubenswrapper[4753]: E0129 12:09:15.408236 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="29a1227f-368f-45d2-a827-8b9e78ed8e5d" containerName="controller-manager" Jan 29 12:09:15 crc kubenswrapper[4753]: I0129 12:09:15.408432 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="29a1227f-368f-45d2-a827-8b9e78ed8e5d" containerName="controller-manager" Jan 29 12:09:15 crc kubenswrapper[4753]: I0129 12:09:15.409465 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="29a1227f-368f-45d2-a827-8b9e78ed8e5d" containerName="controller-manager" Jan 29 12:09:15 crc kubenswrapper[4753]: I0129 12:09:15.412437 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-55788fcfd7-spvgx" Jan 29 12:09:15 crc kubenswrapper[4753]: I0129 12:09:15.447334 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-55788fcfd7-spvgx"] Jan 29 12:09:15 crc kubenswrapper[4753]: I0129 12:09:15.447887 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/29a1227f-368f-45d2-a827-8b9e78ed8e5d-serving-cert\") pod \"29a1227f-368f-45d2-a827-8b9e78ed8e5d\" (UID: \"29a1227f-368f-45d2-a827-8b9e78ed8e5d\") " Jan 29 12:09:15 crc kubenswrapper[4753]: I0129 12:09:15.447961 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vf7gk\" (UniqueName: \"kubernetes.io/projected/29a1227f-368f-45d2-a827-8b9e78ed8e5d-kube-api-access-vf7gk\") pod \"29a1227f-368f-45d2-a827-8b9e78ed8e5d\" (UID: \"29a1227f-368f-45d2-a827-8b9e78ed8e5d\") " Jan 29 12:09:15 crc kubenswrapper[4753]: I0129 12:09:15.448247 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f6w4l\" (UniqueName: \"kubernetes.io/projected/36e98c0d-195d-45f0-9ecb-c83ed362a56b-kube-api-access-f6w4l\") pod \"controller-manager-55788fcfd7-spvgx\" (UID: \"36e98c0d-195d-45f0-9ecb-c83ed362a56b\") " pod="openshift-controller-manager/controller-manager-55788fcfd7-spvgx" Jan 29 12:09:15 crc kubenswrapper[4753]: I0129 12:09:15.448333 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/36e98c0d-195d-45f0-9ecb-c83ed362a56b-config\") pod \"controller-manager-55788fcfd7-spvgx\" (UID: \"36e98c0d-195d-45f0-9ecb-c83ed362a56b\") " pod="openshift-controller-manager/controller-manager-55788fcfd7-spvgx" Jan 29 12:09:15 crc kubenswrapper[4753]: I0129 12:09:15.448365 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/36e98c0d-195d-45f0-9ecb-c83ed362a56b-proxy-ca-bundles\") pod \"controller-manager-55788fcfd7-spvgx\" (UID: \"36e98c0d-195d-45f0-9ecb-c83ed362a56b\") " pod="openshift-controller-manager/controller-manager-55788fcfd7-spvgx" Jan 29 12:09:15 crc kubenswrapper[4753]: I0129 12:09:15.448406 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/36e98c0d-195d-45f0-9ecb-c83ed362a56b-serving-cert\") pod \"controller-manager-55788fcfd7-spvgx\" (UID: \"36e98c0d-195d-45f0-9ecb-c83ed362a56b\") " pod="openshift-controller-manager/controller-manager-55788fcfd7-spvgx" Jan 29 12:09:15 crc kubenswrapper[4753]: I0129 12:09:15.448436 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/36e98c0d-195d-45f0-9ecb-c83ed362a56b-client-ca\") pod \"controller-manager-55788fcfd7-spvgx\" (UID: \"36e98c0d-195d-45f0-9ecb-c83ed362a56b\") " pod="openshift-controller-manager/controller-manager-55788fcfd7-spvgx" Jan 29 12:09:15 crc kubenswrapper[4753]: I0129 12:09:15.449210 4753 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/29a1227f-368f-45d2-a827-8b9e78ed8e5d-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 29 12:09:15 crc kubenswrapper[4753]: I0129 12:09:15.449285 4753 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/29a1227f-368f-45d2-a827-8b9e78ed8e5d-config\") on node \"crc\" DevicePath \"\"" Jan 29 12:09:15 crc kubenswrapper[4753]: I0129 12:09:15.449297 4753 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/29a1227f-368f-45d2-a827-8b9e78ed8e5d-client-ca\") on node \"crc\" DevicePath \"\"" Jan 29 12:09:15 crc kubenswrapper[4753]: I0129 12:09:15.470616 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/29a1227f-368f-45d2-a827-8b9e78ed8e5d-kube-api-access-vf7gk" (OuterVolumeSpecName: "kube-api-access-vf7gk") pod "29a1227f-368f-45d2-a827-8b9e78ed8e5d" (UID: "29a1227f-368f-45d2-a827-8b9e78ed8e5d"). InnerVolumeSpecName "kube-api-access-vf7gk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:09:15 crc kubenswrapper[4753]: I0129 12:09:15.472134 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/29a1227f-368f-45d2-a827-8b9e78ed8e5d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "29a1227f-368f-45d2-a827-8b9e78ed8e5d" (UID: "29a1227f-368f-45d2-a827-8b9e78ed8e5d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:09:15 crc kubenswrapper[4753]: I0129 12:09:15.498352 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-69cc57b4b8-4x7d6" Jan 29 12:09:15 crc kubenswrapper[4753]: I0129 12:09:15.500766 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-69cc57b4b8-4x7d6" event={"ID":"29a1227f-368f-45d2-a827-8b9e78ed8e5d","Type":"ContainerDied","Data":"ad20baf9cd64287dbad14e1af0effa536a34b9eba35117b113f61b0ce40021e3"} Jan 29 12:09:15 crc kubenswrapper[4753]: I0129 12:09:15.500889 4753 scope.go:117] "RemoveContainer" containerID="e7d2f0546894f2aad1d7793f73ff8857c12bfa0d2b0882ce799ccc3e00c50cad" Jan 29 12:09:15 crc kubenswrapper[4753]: I0129 12:09:15.515540 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Jan 29 12:09:15 crc kubenswrapper[4753]: I0129 12:09:15.551026 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/36e98c0d-195d-45f0-9ecb-c83ed362a56b-config\") pod \"controller-manager-55788fcfd7-spvgx\" (UID: \"36e98c0d-195d-45f0-9ecb-c83ed362a56b\") " pod="openshift-controller-manager/controller-manager-55788fcfd7-spvgx" Jan 29 12:09:15 crc kubenswrapper[4753]: I0129 12:09:15.551082 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/36e98c0d-195d-45f0-9ecb-c83ed362a56b-proxy-ca-bundles\") pod \"controller-manager-55788fcfd7-spvgx\" (UID: \"36e98c0d-195d-45f0-9ecb-c83ed362a56b\") " pod="openshift-controller-manager/controller-manager-55788fcfd7-spvgx" Jan 29 12:09:15 crc kubenswrapper[4753]: I0129 12:09:15.551143 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/36e98c0d-195d-45f0-9ecb-c83ed362a56b-serving-cert\") pod \"controller-manager-55788fcfd7-spvgx\" (UID: \"36e98c0d-195d-45f0-9ecb-c83ed362a56b\") " pod="openshift-controller-manager/controller-manager-55788fcfd7-spvgx" Jan 29 12:09:15 crc kubenswrapper[4753]: I0129 12:09:15.551183 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/36e98c0d-195d-45f0-9ecb-c83ed362a56b-client-ca\") pod \"controller-manager-55788fcfd7-spvgx\" (UID: \"36e98c0d-195d-45f0-9ecb-c83ed362a56b\") " pod="openshift-controller-manager/controller-manager-55788fcfd7-spvgx" Jan 29 12:09:15 crc kubenswrapper[4753]: I0129 12:09:15.551264 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f6w4l\" (UniqueName: \"kubernetes.io/projected/36e98c0d-195d-45f0-9ecb-c83ed362a56b-kube-api-access-f6w4l\") pod \"controller-manager-55788fcfd7-spvgx\" (UID: \"36e98c0d-195d-45f0-9ecb-c83ed362a56b\") " pod="openshift-controller-manager/controller-manager-55788fcfd7-spvgx" Jan 29 12:09:15 crc kubenswrapper[4753]: I0129 12:09:15.551386 4753 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/29a1227f-368f-45d2-a827-8b9e78ed8e5d-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 12:09:15 crc kubenswrapper[4753]: I0129 12:09:15.551408 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vf7gk\" (UniqueName: \"kubernetes.io/projected/29a1227f-368f-45d2-a827-8b9e78ed8e5d-kube-api-access-vf7gk\") on node \"crc\" DevicePath \"\"" Jan 29 12:09:15 crc kubenswrapper[4753]: I0129 12:09:15.553466 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/36e98c0d-195d-45f0-9ecb-c83ed362a56b-config\") pod \"controller-manager-55788fcfd7-spvgx\" (UID: \"36e98c0d-195d-45f0-9ecb-c83ed362a56b\") " pod="openshift-controller-manager/controller-manager-55788fcfd7-spvgx" Jan 29 12:09:15 crc kubenswrapper[4753]: I0129 12:09:15.555012 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/36e98c0d-195d-45f0-9ecb-c83ed362a56b-proxy-ca-bundles\") pod \"controller-manager-55788fcfd7-spvgx\" (UID: \"36e98c0d-195d-45f0-9ecb-c83ed362a56b\") " pod="openshift-controller-manager/controller-manager-55788fcfd7-spvgx" Jan 29 12:09:15 crc kubenswrapper[4753]: I0129 12:09:15.556058 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/36e98c0d-195d-45f0-9ecb-c83ed362a56b-client-ca\") pod \"controller-manager-55788fcfd7-spvgx\" (UID: \"36e98c0d-195d-45f0-9ecb-c83ed362a56b\") " pod="openshift-controller-manager/controller-manager-55788fcfd7-spvgx" Jan 29 12:09:15 crc kubenswrapper[4753]: I0129 12:09:15.558170 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/36e98c0d-195d-45f0-9ecb-c83ed362a56b-serving-cert\") pod \"controller-manager-55788fcfd7-spvgx\" (UID: \"36e98c0d-195d-45f0-9ecb-c83ed362a56b\") " pod="openshift-controller-manager/controller-manager-55788fcfd7-spvgx" Jan 29 12:09:15 crc kubenswrapper[4753]: I0129 12:09:15.570967 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-69cc57b4b8-4x7d6"] Jan 29 12:09:15 crc kubenswrapper[4753]: I0129 12:09:15.571157 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f6w4l\" (UniqueName: \"kubernetes.io/projected/36e98c0d-195d-45f0-9ecb-c83ed362a56b-kube-api-access-f6w4l\") pod \"controller-manager-55788fcfd7-spvgx\" (UID: \"36e98c0d-195d-45f0-9ecb-c83ed362a56b\") " pod="openshift-controller-manager/controller-manager-55788fcfd7-spvgx" Jan 29 12:09:15 crc kubenswrapper[4753]: I0129 12:09:15.573073 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-69cc57b4b8-4x7d6"] Jan 29 12:09:15 crc kubenswrapper[4753]: I0129 12:09:15.834859 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-8694c74fd8-qgldq" Jan 29 12:09:15 crc kubenswrapper[4753]: I0129 12:09:15.856887 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-55788fcfd7-spvgx" Jan 29 12:09:15 crc kubenswrapper[4753]: I0129 12:09:15.909953 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6ab7c0ec-f1e4-42ec-9b2f-21371d7129ca-config\") pod \"6ab7c0ec-f1e4-42ec-9b2f-21371d7129ca\" (UID: \"6ab7c0ec-f1e4-42ec-9b2f-21371d7129ca\") " Jan 29 12:09:15 crc kubenswrapper[4753]: I0129 12:09:15.982621 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ab7c0ec-f1e4-42ec-9b2f-21371d7129ca-config" (OuterVolumeSpecName: "config") pod "6ab7c0ec-f1e4-42ec-9b2f-21371d7129ca" (UID: "6ab7c0ec-f1e4-42ec-9b2f-21371d7129ca"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:09:16 crc kubenswrapper[4753]: I0129 12:09:16.186170 4753 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6ab7c0ec-f1e4-42ec-9b2f-21371d7129ca-config\") on node \"crc\" DevicePath \"\"" Jan 29 12:09:16 crc kubenswrapper[4753]: I0129 12:09:16.233412 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="29a1227f-368f-45d2-a827-8b9e78ed8e5d" path="/var/lib/kubelet/pods/29a1227f-368f-45d2-a827-8b9e78ed8e5d/volumes" Jan 29 12:09:16 crc kubenswrapper[4753]: I0129 12:09:16.287736 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6ab7c0ec-f1e4-42ec-9b2f-21371d7129ca-client-ca\") pod \"6ab7c0ec-f1e4-42ec-9b2f-21371d7129ca\" (UID: \"6ab7c0ec-f1e4-42ec-9b2f-21371d7129ca\") " Jan 29 12:09:16 crc kubenswrapper[4753]: I0129 12:09:16.288134 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f6nfs\" (UniqueName: \"kubernetes.io/projected/6ab7c0ec-f1e4-42ec-9b2f-21371d7129ca-kube-api-access-f6nfs\") pod \"6ab7c0ec-f1e4-42ec-9b2f-21371d7129ca\" (UID: \"6ab7c0ec-f1e4-42ec-9b2f-21371d7129ca\") " Jan 29 12:09:16 crc kubenswrapper[4753]: I0129 12:09:16.288207 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6ab7c0ec-f1e4-42ec-9b2f-21371d7129ca-serving-cert\") pod \"6ab7c0ec-f1e4-42ec-9b2f-21371d7129ca\" (UID: \"6ab7c0ec-f1e4-42ec-9b2f-21371d7129ca\") " Jan 29 12:09:16 crc kubenswrapper[4753]: I0129 12:09:16.288683 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ab7c0ec-f1e4-42ec-9b2f-21371d7129ca-client-ca" (OuterVolumeSpecName: "client-ca") pod "6ab7c0ec-f1e4-42ec-9b2f-21371d7129ca" (UID: "6ab7c0ec-f1e4-42ec-9b2f-21371d7129ca"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:09:16 crc kubenswrapper[4753]: I0129 12:09:16.452987 4753 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6ab7c0ec-f1e4-42ec-9b2f-21371d7129ca-client-ca\") on node \"crc\" DevicePath \"\"" Jan 29 12:09:16 crc kubenswrapper[4753]: I0129 12:09:16.512451 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"8939830c-019d-4f94-8242-c2f68f0ecd40","Type":"ContainerStarted","Data":"067c816ced3fa2904361be4b5dcfcbcf7df84d34a39f650df30abcae05e04bce"} Jan 29 12:09:16 crc kubenswrapper[4753]: I0129 12:09:16.515854 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-8694c74fd8-qgldq" event={"ID":"6ab7c0ec-f1e4-42ec-9b2f-21371d7129ca","Type":"ContainerDied","Data":"e83adfc6cc0652853c8677726661a0a6e916c9dfb9cef421b3520961b2b461af"} Jan 29 12:09:16 crc kubenswrapper[4753]: I0129 12:09:16.515912 4753 scope.go:117] "RemoveContainer" containerID="72298778b5d691c735b30123fc8e5f178bf444d3c8a3980f1d0b0843a0da37f9" Jan 29 12:09:16 crc kubenswrapper[4753]: I0129 12:09:16.515962 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-8694c74fd8-qgldq" Jan 29 12:09:16 crc kubenswrapper[4753]: I0129 12:09:16.522457 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ab7c0ec-f1e4-42ec-9b2f-21371d7129ca-kube-api-access-f6nfs" (OuterVolumeSpecName: "kube-api-access-f6nfs") pod "6ab7c0ec-f1e4-42ec-9b2f-21371d7129ca" (UID: "6ab7c0ec-f1e4-42ec-9b2f-21371d7129ca"). InnerVolumeSpecName "kube-api-access-f6nfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:09:16 crc kubenswrapper[4753]: I0129 12:09:16.550048 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ab7c0ec-f1e4-42ec-9b2f-21371d7129ca-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6ab7c0ec-f1e4-42ec-9b2f-21371d7129ca" (UID: "6ab7c0ec-f1e4-42ec-9b2f-21371d7129ca"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:09:16 crc kubenswrapper[4753]: I0129 12:09:16.553821 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f6nfs\" (UniqueName: \"kubernetes.io/projected/6ab7c0ec-f1e4-42ec-9b2f-21371d7129ca-kube-api-access-f6nfs\") on node \"crc\" DevicePath \"\"" Jan 29 12:09:16 crc kubenswrapper[4753]: I0129 12:09:16.553966 4753 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6ab7c0ec-f1e4-42ec-9b2f-21371d7129ca-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 12:09:16 crc kubenswrapper[4753]: I0129 12:09:16.855350 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-8694c74fd8-qgldq"] Jan 29 12:09:16 crc kubenswrapper[4753]: I0129 12:09:16.859238 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-8694c74fd8-qgldq"] Jan 29 12:09:16 crc kubenswrapper[4753]: I0129 12:09:16.862123 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-55788fcfd7-spvgx"] Jan 29 12:09:16 crc kubenswrapper[4753]: W0129 12:09:16.995595 4753 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod36e98c0d_195d_45f0_9ecb_c83ed362a56b.slice/crio-e4fcaa519b12394191fe8305f1b63ba2f8fb9f7ca515fc847452761652b3119f WatchSource:0}: Error finding container e4fcaa519b12394191fe8305f1b63ba2f8fb9f7ca515fc847452761652b3119f: Status 404 returned error can't find the container with id e4fcaa519b12394191fe8305f1b63ba2f8fb9f7ca515fc847452761652b3119f Jan 29 12:09:17 crc kubenswrapper[4753]: I0129 12:09:17.587553 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-55788fcfd7-spvgx" event={"ID":"36e98c0d-195d-45f0-9ecb-c83ed362a56b","Type":"ContainerStarted","Data":"e4fcaa519b12394191fe8305f1b63ba2f8fb9f7ca515fc847452761652b3119f"} Jan 29 12:09:17 crc kubenswrapper[4753]: I0129 12:09:17.619704 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Jan 29 12:09:17 crc kubenswrapper[4753]: E0129 12:09:17.620048 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6ab7c0ec-f1e4-42ec-9b2f-21371d7129ca" containerName="route-controller-manager" Jan 29 12:09:17 crc kubenswrapper[4753]: I0129 12:09:17.620064 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="6ab7c0ec-f1e4-42ec-9b2f-21371d7129ca" containerName="route-controller-manager" Jan 29 12:09:17 crc kubenswrapper[4753]: I0129 12:09:17.620210 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="6ab7c0ec-f1e4-42ec-9b2f-21371d7129ca" containerName="route-controller-manager" Jan 29 12:09:17 crc kubenswrapper[4753]: I0129 12:09:17.620797 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 29 12:09:17 crc kubenswrapper[4753]: I0129 12:09:17.666336 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7657498d8f-596p2"] Jan 29 12:09:17 crc kubenswrapper[4753]: I0129 12:09:17.667852 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Jan 29 12:09:17 crc kubenswrapper[4753]: I0129 12:09:17.668017 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7657498d8f-596p2"] Jan 29 12:09:17 crc kubenswrapper[4753]: I0129 12:09:17.668239 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7657498d8f-596p2" Jan 29 12:09:17 crc kubenswrapper[4753]: I0129 12:09:17.759660 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a9185c9f-17d7-4e5b-84cf-4be6d5233abd-kube-api-access\") pod \"installer-9-crc\" (UID: \"a9185c9f-17d7-4e5b-84cf-4be6d5233abd\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 29 12:09:17 crc kubenswrapper[4753]: I0129 12:09:17.760097 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/a9185c9f-17d7-4e5b-84cf-4be6d5233abd-kubelet-dir\") pod \"installer-9-crc\" (UID: \"a9185c9f-17d7-4e5b-84cf-4be6d5233abd\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 29 12:09:17 crc kubenswrapper[4753]: I0129 12:09:17.760165 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/a9185c9f-17d7-4e5b-84cf-4be6d5233abd-var-lock\") pod \"installer-9-crc\" (UID: \"a9185c9f-17d7-4e5b-84cf-4be6d5233abd\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 29 12:09:17 crc kubenswrapper[4753]: I0129 12:09:17.802023 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 29 12:09:17 crc kubenswrapper[4753]: I0129 12:09:17.802145 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 29 12:09:17 crc kubenswrapper[4753]: I0129 12:09:17.802209 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 29 12:09:17 crc kubenswrapper[4753]: I0129 12:09:17.802023 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 29 12:09:17 crc kubenswrapper[4753]: I0129 12:09:17.802726 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 29 12:09:17 crc kubenswrapper[4753]: I0129 12:09:17.803422 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 29 12:09:17 crc kubenswrapper[4753]: I0129 12:09:17.920049 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a9185c9f-17d7-4e5b-84cf-4be6d5233abd-kube-api-access\") pod \"installer-9-crc\" (UID: \"a9185c9f-17d7-4e5b-84cf-4be6d5233abd\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 29 12:09:17 crc kubenswrapper[4753]: I0129 12:09:17.920251 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p8988\" (UniqueName: \"kubernetes.io/projected/bbffd8b6-cc70-40c7-a5fa-bb36544deb3b-kube-api-access-p8988\") pod \"route-controller-manager-7657498d8f-596p2\" (UID: \"bbffd8b6-cc70-40c7-a5fa-bb36544deb3b\") " pod="openshift-route-controller-manager/route-controller-manager-7657498d8f-596p2" Jan 29 12:09:17 crc kubenswrapper[4753]: I0129 12:09:17.920354 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/bbffd8b6-cc70-40c7-a5fa-bb36544deb3b-client-ca\") pod \"route-controller-manager-7657498d8f-596p2\" (UID: \"bbffd8b6-cc70-40c7-a5fa-bb36544deb3b\") " pod="openshift-route-controller-manager/route-controller-manager-7657498d8f-596p2" Jan 29 12:09:17 crc kubenswrapper[4753]: I0129 12:09:17.920438 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bbffd8b6-cc70-40c7-a5fa-bb36544deb3b-config\") pod \"route-controller-manager-7657498d8f-596p2\" (UID: \"bbffd8b6-cc70-40c7-a5fa-bb36544deb3b\") " pod="openshift-route-controller-manager/route-controller-manager-7657498d8f-596p2" Jan 29 12:09:17 crc kubenswrapper[4753]: I0129 12:09:17.920520 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/a9185c9f-17d7-4e5b-84cf-4be6d5233abd-kubelet-dir\") pod \"installer-9-crc\" (UID: \"a9185c9f-17d7-4e5b-84cf-4be6d5233abd\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 29 12:09:17 crc kubenswrapper[4753]: I0129 12:09:17.920604 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/a9185c9f-17d7-4e5b-84cf-4be6d5233abd-var-lock\") pod \"installer-9-crc\" (UID: \"a9185c9f-17d7-4e5b-84cf-4be6d5233abd\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 29 12:09:17 crc kubenswrapper[4753]: I0129 12:09:17.920689 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bbffd8b6-cc70-40c7-a5fa-bb36544deb3b-serving-cert\") pod \"route-controller-manager-7657498d8f-596p2\" (UID: \"bbffd8b6-cc70-40c7-a5fa-bb36544deb3b\") " pod="openshift-route-controller-manager/route-controller-manager-7657498d8f-596p2" Jan 29 12:09:17 crc kubenswrapper[4753]: I0129 12:09:17.921861 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/a9185c9f-17d7-4e5b-84cf-4be6d5233abd-kubelet-dir\") pod \"installer-9-crc\" (UID: \"a9185c9f-17d7-4e5b-84cf-4be6d5233abd\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 29 12:09:17 crc kubenswrapper[4753]: I0129 12:09:17.921948 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/a9185c9f-17d7-4e5b-84cf-4be6d5233abd-var-lock\") pod \"installer-9-crc\" (UID: \"a9185c9f-17d7-4e5b-84cf-4be6d5233abd\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 29 12:09:18 crc kubenswrapper[4753]: I0129 12:09:17.964568 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a9185c9f-17d7-4e5b-84cf-4be6d5233abd-kube-api-access\") pod \"installer-9-crc\" (UID: \"a9185c9f-17d7-4e5b-84cf-4be6d5233abd\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 29 12:09:18 crc kubenswrapper[4753]: I0129 12:09:17.964902 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ab7c0ec-f1e4-42ec-9b2f-21371d7129ca" path="/var/lib/kubelet/pods/6ab7c0ec-f1e4-42ec-9b2f-21371d7129ca/volumes" Jan 29 12:09:18 crc kubenswrapper[4753]: I0129 12:09:18.024144 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p8988\" (UniqueName: \"kubernetes.io/projected/bbffd8b6-cc70-40c7-a5fa-bb36544deb3b-kube-api-access-p8988\") pod \"route-controller-manager-7657498d8f-596p2\" (UID: \"bbffd8b6-cc70-40c7-a5fa-bb36544deb3b\") " pod="openshift-route-controller-manager/route-controller-manager-7657498d8f-596p2" Jan 29 12:09:18 crc kubenswrapper[4753]: I0129 12:09:18.024208 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/bbffd8b6-cc70-40c7-a5fa-bb36544deb3b-client-ca\") pod \"route-controller-manager-7657498d8f-596p2\" (UID: \"bbffd8b6-cc70-40c7-a5fa-bb36544deb3b\") " pod="openshift-route-controller-manager/route-controller-manager-7657498d8f-596p2" Jan 29 12:09:18 crc kubenswrapper[4753]: I0129 12:09:18.024407 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bbffd8b6-cc70-40c7-a5fa-bb36544deb3b-config\") pod \"route-controller-manager-7657498d8f-596p2\" (UID: \"bbffd8b6-cc70-40c7-a5fa-bb36544deb3b\") " pod="openshift-route-controller-manager/route-controller-manager-7657498d8f-596p2" Jan 29 12:09:18 crc kubenswrapper[4753]: I0129 12:09:18.024566 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bbffd8b6-cc70-40c7-a5fa-bb36544deb3b-serving-cert\") pod \"route-controller-manager-7657498d8f-596p2\" (UID: \"bbffd8b6-cc70-40c7-a5fa-bb36544deb3b\") " pod="openshift-route-controller-manager/route-controller-manager-7657498d8f-596p2" Jan 29 12:09:18 crc kubenswrapper[4753]: I0129 12:09:18.026144 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/bbffd8b6-cc70-40c7-a5fa-bb36544deb3b-client-ca\") pod \"route-controller-manager-7657498d8f-596p2\" (UID: \"bbffd8b6-cc70-40c7-a5fa-bb36544deb3b\") " pod="openshift-route-controller-manager/route-controller-manager-7657498d8f-596p2" Jan 29 12:09:18 crc kubenswrapper[4753]: I0129 12:09:18.026244 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bbffd8b6-cc70-40c7-a5fa-bb36544deb3b-config\") pod \"route-controller-manager-7657498d8f-596p2\" (UID: \"bbffd8b6-cc70-40c7-a5fa-bb36544deb3b\") " pod="openshift-route-controller-manager/route-controller-manager-7657498d8f-596p2" Jan 29 12:09:18 crc kubenswrapper[4753]: I0129 12:09:18.036679 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bbffd8b6-cc70-40c7-a5fa-bb36544deb3b-serving-cert\") pod \"route-controller-manager-7657498d8f-596p2\" (UID: \"bbffd8b6-cc70-40c7-a5fa-bb36544deb3b\") " pod="openshift-route-controller-manager/route-controller-manager-7657498d8f-596p2" Jan 29 12:09:18 crc kubenswrapper[4753]: I0129 12:09:18.060991 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p8988\" (UniqueName: \"kubernetes.io/projected/bbffd8b6-cc70-40c7-a5fa-bb36544deb3b-kube-api-access-p8988\") pod \"route-controller-manager-7657498d8f-596p2\" (UID: \"bbffd8b6-cc70-40c7-a5fa-bb36544deb3b\") " pod="openshift-route-controller-manager/route-controller-manager-7657498d8f-596p2" Jan 29 12:09:18 crc kubenswrapper[4753]: I0129 12:09:18.359660 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 29 12:09:18 crc kubenswrapper[4753]: I0129 12:09:18.482965 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7657498d8f-596p2" Jan 29 12:09:18 crc kubenswrapper[4753]: I0129 12:09:18.508544 4753 patch_prober.go:28] interesting pod/downloads-7954f5f757-b2rb9 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" start-of-body= Jan 29 12:09:18 crc kubenswrapper[4753]: I0129 12:09:18.508648 4753 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-b2rb9" podUID="200ffbfe-dff4-45e2-944d-34a3ad56f018" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" Jan 29 12:09:18 crc kubenswrapper[4753]: I0129 12:09:18.509082 4753 patch_prober.go:28] interesting pod/downloads-7954f5f757-b2rb9 container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" start-of-body= Jan 29 12:09:18 crc kubenswrapper[4753]: I0129 12:09:18.509109 4753 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-b2rb9" podUID="200ffbfe-dff4-45e2-944d-34a3ad56f018" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" Jan 29 12:09:18 crc kubenswrapper[4753]: I0129 12:09:18.520847 4753 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-f9d7485db-mql86" Jan 29 12:09:18 crc kubenswrapper[4753]: I0129 12:09:18.527944 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-f9d7485db-mql86" Jan 29 12:09:18 crc kubenswrapper[4753]: I0129 12:09:18.752359 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"8939830c-019d-4f94-8242-c2f68f0ecd40","Type":"ContainerStarted","Data":"6f05144a6e35f280d91a55c683d055c65120e9a81b7e13101dcb6a0d52fe3633"} Jan 29 12:09:18 crc kubenswrapper[4753]: I0129 12:09:18.909337 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-9-crc" podStartSLOduration=6.909284949 podStartE2EDuration="6.909284949s" podCreationTimestamp="2026-01-29 12:09:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:09:18.907826946 +0000 UTC m=+173.159908401" watchObservedRunningTime="2026-01-29 12:09:18.909284949 +0000 UTC m=+173.161366404" Jan 29 12:09:20 crc kubenswrapper[4753]: I0129 12:09:20.093573 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Jan 29 12:09:20 crc kubenswrapper[4753]: I0129 12:09:20.102446 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-55788fcfd7-spvgx" event={"ID":"36e98c0d-195d-45f0-9ecb-c83ed362a56b","Type":"ContainerStarted","Data":"de5712b884dcb2dba2784a29f8a07a21e33ce657aa22ddcef34f8a79439d3703"} Jan 29 12:09:20 crc kubenswrapper[4753]: I0129 12:09:20.102871 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-55788fcfd7-spvgx" Jan 29 12:09:20 crc kubenswrapper[4753]: I0129 12:09:20.116035 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-55788fcfd7-spvgx" Jan 29 12:09:20 crc kubenswrapper[4753]: I0129 12:09:20.152168 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-55788fcfd7-spvgx" podStartSLOduration=8.152140819 podStartE2EDuration="8.152140819s" podCreationTimestamp="2026-01-29 12:09:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:09:20.139412482 +0000 UTC m=+174.391493937" watchObservedRunningTime="2026-01-29 12:09:20.152140819 +0000 UTC m=+174.404222274" Jan 29 12:09:20 crc kubenswrapper[4753]: I0129 12:09:20.442377 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7657498d8f-596p2"] Jan 29 12:09:21 crc kubenswrapper[4753]: I0129 12:09:21.131050 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"a9185c9f-17d7-4e5b-84cf-4be6d5233abd","Type":"ContainerStarted","Data":"b5b6dae4293eb08245393b4c968be2ebcdf0c35a4016f77610d86864fdc5ceba"} Jan 29 12:09:22 crc kubenswrapper[4753]: I0129 12:09:22.146128 4753 generic.go:334] "Generic (PLEG): container finished" podID="8939830c-019d-4f94-8242-c2f68f0ecd40" containerID="6f05144a6e35f280d91a55c683d055c65120e9a81b7e13101dcb6a0d52fe3633" exitCode=0 Jan 29 12:09:22 crc kubenswrapper[4753]: I0129 12:09:22.146515 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"8939830c-019d-4f94-8242-c2f68f0ecd40","Type":"ContainerDied","Data":"6f05144a6e35f280d91a55c683d055c65120e9a81b7e13101dcb6a0d52fe3633"} Jan 29 12:09:23 crc kubenswrapper[4753]: W0129 12:09:23.388256 4753 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbbffd8b6_cc70_40c7_a5fa_bb36544deb3b.slice/crio-e4cf0c4649426f8c624c6b6f21578fb67b1411974c8e0521f22c3f33aba2c114 WatchSource:0}: Error finding container e4cf0c4649426f8c624c6b6f21578fb67b1411974c8e0521f22c3f33aba2c114: Status 404 returned error can't find the container with id e4cf0c4649426f8c624c6b6f21578fb67b1411974c8e0521f22c3f33aba2c114 Jan 29 12:09:24 crc kubenswrapper[4753]: I0129 12:09:24.994201 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7657498d8f-596p2" event={"ID":"bbffd8b6-cc70-40c7-a5fa-bb36544deb3b","Type":"ContainerStarted","Data":"e4cf0c4649426f8c624c6b6f21578fb67b1411974c8e0521f22c3f33aba2c114"} Jan 29 12:09:26 crc kubenswrapper[4753]: I0129 12:09:26.399476 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7657498d8f-596p2" event={"ID":"bbffd8b6-cc70-40c7-a5fa-bb36544deb3b","Type":"ContainerStarted","Data":"789f686350a37c412441d5d553653c635a6b80fed4f60fc3433c350500e695c9"} Jan 29 12:09:26 crc kubenswrapper[4753]: I0129 12:09:26.435343 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-7657498d8f-596p2" podStartSLOduration=14.435196039 podStartE2EDuration="14.435196039s" podCreationTimestamp="2026-01-29 12:09:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:09:26.426998086 +0000 UTC m=+180.679079551" watchObservedRunningTime="2026-01-29 12:09:26.435196039 +0000 UTC m=+180.687277494" Jan 29 12:09:26 crc kubenswrapper[4753]: I0129 12:09:26.617682 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 29 12:09:27 crc kubenswrapper[4753]: I0129 12:09:27.376511 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/8939830c-019d-4f94-8242-c2f68f0ecd40-kube-api-access\") pod \"8939830c-019d-4f94-8242-c2f68f0ecd40\" (UID: \"8939830c-019d-4f94-8242-c2f68f0ecd40\") " Jan 29 12:09:27 crc kubenswrapper[4753]: I0129 12:09:27.380908 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/8939830c-019d-4f94-8242-c2f68f0ecd40-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "8939830c-019d-4f94-8242-c2f68f0ecd40" (UID: "8939830c-019d-4f94-8242-c2f68f0ecd40"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 12:09:27 crc kubenswrapper[4753]: I0129 12:09:27.376652 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/8939830c-019d-4f94-8242-c2f68f0ecd40-kubelet-dir\") pod \"8939830c-019d-4f94-8242-c2f68f0ecd40\" (UID: \"8939830c-019d-4f94-8242-c2f68f0ecd40\") " Jan 29 12:09:27 crc kubenswrapper[4753]: I0129 12:09:27.384952 4753 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/8939830c-019d-4f94-8242-c2f68f0ecd40-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 29 12:09:27 crc kubenswrapper[4753]: I0129 12:09:27.392947 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8939830c-019d-4f94-8242-c2f68f0ecd40-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "8939830c-019d-4f94-8242-c2f68f0ecd40" (UID: "8939830c-019d-4f94-8242-c2f68f0ecd40"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:09:27 crc kubenswrapper[4753]: I0129 12:09:27.444886 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"8939830c-019d-4f94-8242-c2f68f0ecd40","Type":"ContainerDied","Data":"067c816ced3fa2904361be4b5dcfcbcf7df84d34a39f650df30abcae05e04bce"} Jan 29 12:09:27 crc kubenswrapper[4753]: I0129 12:09:27.445013 4753 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="067c816ced3fa2904361be4b5dcfcbcf7df84d34a39f650df30abcae05e04bce" Jan 29 12:09:27 crc kubenswrapper[4753]: I0129 12:09:27.445190 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 29 12:09:27 crc kubenswrapper[4753]: I0129 12:09:27.465285 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"a9185c9f-17d7-4e5b-84cf-4be6d5233abd","Type":"ContainerStarted","Data":"e13cfeb25210f87c33254c71095be483cac65fcc7f6298cc62c23bd62fad8081"} Jan 29 12:09:27 crc kubenswrapper[4753]: I0129 12:09:27.465358 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-7657498d8f-596p2" Jan 29 12:09:27 crc kubenswrapper[4753]: I0129 12:09:27.473345 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-7657498d8f-596p2" Jan 29 12:09:28 crc kubenswrapper[4753]: I0129 12:09:28.012391 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/8939830c-019d-4f94-8242-c2f68f0ecd40-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 29 12:09:28 crc kubenswrapper[4753]: I0129 12:09:28.051856 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/installer-9-crc" podStartSLOduration=11.051832397 podStartE2EDuration="11.051832397s" podCreationTimestamp="2026-01-29 12:09:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:09:28.035470464 +0000 UTC m=+182.287551929" watchObservedRunningTime="2026-01-29 12:09:28.051832397 +0000 UTC m=+182.303913852" Jan 29 12:09:28 crc kubenswrapper[4753]: I0129 12:09:28.531357 4753 patch_prober.go:28] interesting pod/downloads-7954f5f757-b2rb9 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" start-of-body= Jan 29 12:09:28 crc kubenswrapper[4753]: I0129 12:09:28.532249 4753 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-b2rb9" podUID="200ffbfe-dff4-45e2-944d-34a3ad56f018" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" Jan 29 12:09:28 crc kubenswrapper[4753]: I0129 12:09:28.533797 4753 patch_prober.go:28] interesting pod/downloads-7954f5f757-b2rb9 container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" start-of-body= Jan 29 12:09:28 crc kubenswrapper[4753]: I0129 12:09:28.533863 4753 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-b2rb9" podUID="200ffbfe-dff4-45e2-944d-34a3ad56f018" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" Jan 29 12:09:29 crc kubenswrapper[4753]: I0129 12:09:29.252914 4753 patch_prober.go:28] interesting pod/machine-config-daemon-7c24x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 12:09:29 crc kubenswrapper[4753]: I0129 12:09:29.253011 4753 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" podUID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 12:09:38 crc kubenswrapper[4753]: I0129 12:09:38.506731 4753 patch_prober.go:28] interesting pod/downloads-7954f5f757-b2rb9 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" start-of-body= Jan 29 12:09:38 crc kubenswrapper[4753]: I0129 12:09:38.507402 4753 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-b2rb9" podUID="200ffbfe-dff4-45e2-944d-34a3ad56f018" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" Jan 29 12:09:38 crc kubenswrapper[4753]: I0129 12:09:38.506809 4753 patch_prober.go:28] interesting pod/downloads-7954f5f757-b2rb9 container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" start-of-body= Jan 29 12:09:38 crc kubenswrapper[4753]: I0129 12:09:38.507512 4753 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-b2rb9" podUID="200ffbfe-dff4-45e2-944d-34a3ad56f018" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" Jan 29 12:09:38 crc kubenswrapper[4753]: I0129 12:09:38.507608 4753 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-console/downloads-7954f5f757-b2rb9" Jan 29 12:09:38 crc kubenswrapper[4753]: I0129 12:09:38.508632 4753 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="download-server" containerStatusID={"Type":"cri-o","ID":"ef0121188c254337af0c007914b35d5b2619129cddc2287bb20ea16c8b65b6a3"} pod="openshift-console/downloads-7954f5f757-b2rb9" containerMessage="Container download-server failed liveness probe, will be restarted" Jan 29 12:09:38 crc kubenswrapper[4753]: I0129 12:09:38.508712 4753 patch_prober.go:28] interesting pod/downloads-7954f5f757-b2rb9 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" start-of-body= Jan 29 12:09:38 crc kubenswrapper[4753]: I0129 12:09:38.508739 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/downloads-7954f5f757-b2rb9" podUID="200ffbfe-dff4-45e2-944d-34a3ad56f018" containerName="download-server" containerID="cri-o://ef0121188c254337af0c007914b35d5b2619129cddc2287bb20ea16c8b65b6a3" gracePeriod=2 Jan 29 12:09:38 crc kubenswrapper[4753]: I0129 12:09:38.508743 4753 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-b2rb9" podUID="200ffbfe-dff4-45e2-944d-34a3ad56f018" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" Jan 29 12:09:40 crc kubenswrapper[4753]: I0129 12:09:39.809962 4753 generic.go:334] "Generic (PLEG): container finished" podID="200ffbfe-dff4-45e2-944d-34a3ad56f018" containerID="ef0121188c254337af0c007914b35d5b2619129cddc2287bb20ea16c8b65b6a3" exitCode=0 Jan 29 12:09:40 crc kubenswrapper[4753]: I0129 12:09:39.809980 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-b2rb9" event={"ID":"200ffbfe-dff4-45e2-944d-34a3ad56f018","Type":"ContainerDied","Data":"ef0121188c254337af0c007914b35d5b2619129cddc2287bb20ea16c8b65b6a3"} Jan 29 12:09:40 crc kubenswrapper[4753]: I0129 12:09:39.810525 4753 scope.go:117] "RemoveContainer" containerID="57e3502968cdee1c7892834df8e019409c473c88421470a379ea0d888bc9c4dc" Jan 29 12:09:46 crc kubenswrapper[4753]: I0129 12:09:46.145919 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 12:09:48 crc kubenswrapper[4753]: I0129 12:09:48.542813 4753 patch_prober.go:28] interesting pod/downloads-7954f5f757-b2rb9 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" start-of-body= Jan 29 12:09:48 crc kubenswrapper[4753]: I0129 12:09:48.543200 4753 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-b2rb9" podUID="200ffbfe-dff4-45e2-944d-34a3ad56f018" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" Jan 29 12:09:58 crc kubenswrapper[4753]: I0129 12:09:58.651947 4753 patch_prober.go:28] interesting pod/downloads-7954f5f757-b2rb9 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" start-of-body= Jan 29 12:09:58 crc kubenswrapper[4753]: I0129 12:09:58.653312 4753 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-b2rb9" podUID="200ffbfe-dff4-45e2-944d-34a3ad56f018" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" Jan 29 12:09:59 crc kubenswrapper[4753]: I0129 12:09:59.253281 4753 patch_prober.go:28] interesting pod/machine-config-daemon-7c24x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 12:09:59 crc kubenswrapper[4753]: I0129 12:09:59.253480 4753 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" podUID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 12:10:08 crc kubenswrapper[4753]: I0129 12:10:08.507498 4753 patch_prober.go:28] interesting pod/downloads-7954f5f757-b2rb9 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" start-of-body= Jan 29 12:10:08 crc kubenswrapper[4753]: I0129 12:10:08.508255 4753 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-b2rb9" podUID="200ffbfe-dff4-45e2-944d-34a3ad56f018" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" Jan 29 12:10:09 crc kubenswrapper[4753]: I0129 12:10:09.686958 4753 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Jan 29 12:10:09 crc kubenswrapper[4753]: E0129 12:10:09.687807 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8939830c-019d-4f94-8242-c2f68f0ecd40" containerName="pruner" Jan 29 12:10:09 crc kubenswrapper[4753]: I0129 12:10:09.687846 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="8939830c-019d-4f94-8242-c2f68f0ecd40" containerName="pruner" Jan 29 12:10:09 crc kubenswrapper[4753]: I0129 12:10:09.688138 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="8939830c-019d-4f94-8242-c2f68f0ecd40" containerName="pruner" Jan 29 12:10:09 crc kubenswrapper[4753]: I0129 12:10:09.688864 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 29 12:10:09 crc kubenswrapper[4753]: I0129 12:10:09.736490 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Jan 29 12:10:09 crc kubenswrapper[4753]: I0129 12:10:09.779612 4753 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 29 12:10:09 crc kubenswrapper[4753]: I0129 12:10:09.780116 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" containerID="cri-o://cab3ab80a8fddbc591cd07fdbed800f525bb8a1b5505bf818ebb59cbcce73003" gracePeriod=15 Jan 29 12:10:09 crc kubenswrapper[4753]: I0129 12:10:09.780252 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" containerID="cri-o://e86597c91848f265cd70410d57ac60eadba1d0322b9b897097366a41c93d8134" gracePeriod=15 Jan 29 12:10:09 crc kubenswrapper[4753]: I0129 12:10:09.780301 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" containerID="cri-o://91d57a9cede640bdcec325175598b72dd00398436b730248728c7d9afd545b1e" gracePeriod=15 Jan 29 12:10:09 crc kubenswrapper[4753]: I0129 12:10:09.780344 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" containerID="cri-o://7e02d977e34888952623f4ced5e073d7bf1ff360f8deb435dc577d2ecf146cb2" gracePeriod=15 Jan 29 12:10:09 crc kubenswrapper[4753]: I0129 12:10:09.780285 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" containerID="cri-o://7af8be0836a8ff7d08ff48d9487acdedd4b83127538d071d13b3215400c0f8de" gracePeriod=15 Jan 29 12:10:09 crc kubenswrapper[4753]: I0129 12:10:09.783733 4753 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 29 12:10:09 crc kubenswrapper[4753]: E0129 12:10:09.784121 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Jan 29 12:10:09 crc kubenswrapper[4753]: I0129 12:10:09.784138 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Jan 29 12:10:09 crc kubenswrapper[4753]: E0129 12:10:09.784149 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Jan 29 12:10:09 crc kubenswrapper[4753]: I0129 12:10:09.784156 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Jan 29 12:10:09 crc kubenswrapper[4753]: E0129 12:10:09.784166 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Jan 29 12:10:09 crc kubenswrapper[4753]: I0129 12:10:09.784173 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Jan 29 12:10:09 crc kubenswrapper[4753]: E0129 12:10:09.784183 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 29 12:10:09 crc kubenswrapper[4753]: I0129 12:10:09.784190 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 29 12:10:09 crc kubenswrapper[4753]: E0129 12:10:09.784203 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Jan 29 12:10:09 crc kubenswrapper[4753]: I0129 12:10:09.784213 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Jan 29 12:10:09 crc kubenswrapper[4753]: E0129 12:10:09.784247 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Jan 29 12:10:09 crc kubenswrapper[4753]: I0129 12:10:09.784255 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Jan 29 12:10:09 crc kubenswrapper[4753]: E0129 12:10:09.784264 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 29 12:10:09 crc kubenswrapper[4753]: I0129 12:10:09.784272 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 29 12:10:09 crc kubenswrapper[4753]: I0129 12:10:09.784401 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 29 12:10:09 crc kubenswrapper[4753]: I0129 12:10:09.784419 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 29 12:10:09 crc kubenswrapper[4753]: I0129 12:10:09.784431 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Jan 29 12:10:09 crc kubenswrapper[4753]: I0129 12:10:09.784442 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 29 12:10:09 crc kubenswrapper[4753]: I0129 12:10:09.784452 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Jan 29 12:10:09 crc kubenswrapper[4753]: I0129 12:10:09.784462 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Jan 29 12:10:09 crc kubenswrapper[4753]: I0129 12:10:09.784471 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Jan 29 12:10:09 crc kubenswrapper[4753]: E0129 12:10:09.784626 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 29 12:10:09 crc kubenswrapper[4753]: I0129 12:10:09.784638 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 29 12:10:09 crc kubenswrapper[4753]: I0129 12:10:09.851436 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 29 12:10:09 crc kubenswrapper[4753]: I0129 12:10:09.851515 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 29 12:10:09 crc kubenswrapper[4753]: I0129 12:10:09.851547 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 12:10:09 crc kubenswrapper[4753]: I0129 12:10:09.851582 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 12:10:09 crc kubenswrapper[4753]: I0129 12:10:09.851611 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 12:10:09 crc kubenswrapper[4753]: I0129 12:10:09.851631 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 29 12:10:09 crc kubenswrapper[4753]: I0129 12:10:09.851666 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 29 12:10:09 crc kubenswrapper[4753]: I0129 12:10:09.851697 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 29 12:10:09 crc kubenswrapper[4753]: I0129 12:10:09.952906 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 29 12:10:09 crc kubenswrapper[4753]: I0129 12:10:09.953020 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 29 12:10:09 crc kubenswrapper[4753]: I0129 12:10:09.953111 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 29 12:10:09 crc kubenswrapper[4753]: I0129 12:10:09.953168 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 29 12:10:09 crc kubenswrapper[4753]: I0129 12:10:09.953472 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 29 12:10:09 crc kubenswrapper[4753]: I0129 12:10:09.953550 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 29 12:10:09 crc kubenswrapper[4753]: I0129 12:10:09.953603 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 29 12:10:09 crc kubenswrapper[4753]: I0129 12:10:09.953644 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 12:10:09 crc kubenswrapper[4753]: I0129 12:10:09.953681 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 12:10:09 crc kubenswrapper[4753]: I0129 12:10:09.953714 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 12:10:09 crc kubenswrapper[4753]: I0129 12:10:09.953780 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 12:10:09 crc kubenswrapper[4753]: I0129 12:10:09.953827 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 29 12:10:09 crc kubenswrapper[4753]: I0129 12:10:09.953849 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 29 12:10:09 crc kubenswrapper[4753]: I0129 12:10:09.954312 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 29 12:10:09 crc kubenswrapper[4753]: I0129 12:10:09.954551 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 12:10:09 crc kubenswrapper[4753]: I0129 12:10:09.954575 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 12:10:10 crc kubenswrapper[4753]: I0129 12:10:10.032592 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 29 12:10:10 crc kubenswrapper[4753]: I0129 12:10:10.325900 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Jan 29 12:10:10 crc kubenswrapper[4753]: I0129 12:10:10.327952 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 29 12:10:10 crc kubenswrapper[4753]: I0129 12:10:10.328789 4753 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="91d57a9cede640bdcec325175598b72dd00398436b730248728c7d9afd545b1e" exitCode=2 Jan 29 12:10:11 crc kubenswrapper[4753]: I0129 12:10:11.341056 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Jan 29 12:10:11 crc kubenswrapper[4753]: I0129 12:10:11.342847 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 29 12:10:11 crc kubenswrapper[4753]: I0129 12:10:11.344184 4753 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="e86597c91848f265cd70410d57ac60eadba1d0322b9b897097366a41c93d8134" exitCode=0 Jan 29 12:10:11 crc kubenswrapper[4753]: I0129 12:10:11.344244 4753 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="7e02d977e34888952623f4ced5e073d7bf1ff360f8deb435dc577d2ecf146cb2" exitCode=0 Jan 29 12:10:11 crc kubenswrapper[4753]: I0129 12:10:11.344258 4753 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="7af8be0836a8ff7d08ff48d9487acdedd4b83127538d071d13b3215400c0f8de" exitCode=0 Jan 29 12:10:11 crc kubenswrapper[4753]: I0129 12:10:11.347450 4753 generic.go:334] "Generic (PLEG): container finished" podID="a9185c9f-17d7-4e5b-84cf-4be6d5233abd" containerID="e13cfeb25210f87c33254c71095be483cac65fcc7f6298cc62c23bd62fad8081" exitCode=0 Jan 29 12:10:11 crc kubenswrapper[4753]: I0129 12:10:11.347500 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"a9185c9f-17d7-4e5b-84cf-4be6d5233abd","Type":"ContainerDied","Data":"e13cfeb25210f87c33254c71095be483cac65fcc7f6298cc62c23bd62fad8081"} Jan 29 12:10:11 crc kubenswrapper[4753]: I0129 12:10:11.350376 4753 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:11 crc kubenswrapper[4753]: I0129 12:10:11.351156 4753 status_manager.go:851] "Failed to get status for pod" podUID="a9185c9f-17d7-4e5b-84cf-4be6d5233abd" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:12 crc kubenswrapper[4753]: E0129 12:10:12.312828 4753 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Jan 29 12:10:12 crc kubenswrapper[4753]: E0129 12:10:12.314159 4753 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-jqg9m,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-7dkr6_openshift-marketplace(318aa5db-6b19-4efe-8c5d-00fbb4a84b13): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 29 12:10:12 crc kubenswrapper[4753]: E0129 12:10:12.315410 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-7dkr6" podUID="318aa5db-6b19-4efe-8c5d-00fbb4a84b13" Jan 29 12:10:12 crc kubenswrapper[4753]: E0129 12:10:12.315723 4753 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/events\": dial tcp 38.129.56.211:6443: connect: connection refused" event="&Event{ObjectMeta:{redhat-marketplace-7dkr6.188f326aac04b32e openshift-marketplace 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-marketplace,Name:redhat-marketplace-7dkr6,UID:318aa5db-6b19-4efe-8c5d-00fbb4a84b13,APIVersion:v1,ResourceVersion:28071,FieldPath:spec.initContainers{extract-content},},Reason:Failed,Message:Failed to pull image \"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\": rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-29 12:10:12.313805614 +0000 UTC m=+226.565887069,LastTimestamp:2026-01-29 12:10:12.313805614 +0000 UTC m=+226.565887069,Count:1,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 29 12:10:12 crc kubenswrapper[4753]: E0129 12:10:12.341531 4753 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Jan 29 12:10:12 crc kubenswrapper[4753]: E0129 12:10:12.342208 4753 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-2bhr9,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-b52ds_openshift-marketplace(d818fc13-9863-4172-a818-4e01af393842): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 29 12:10:12 crc kubenswrapper[4753]: E0129 12:10:12.343694 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-b52ds" podUID="d818fc13-9863-4172-a818-4e01af393842" Jan 29 12:10:12 crc kubenswrapper[4753]: I0129 12:10:12.361101 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Jan 29 12:10:12 crc kubenswrapper[4753]: I0129 12:10:12.362869 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 29 12:10:12 crc kubenswrapper[4753]: I0129 12:10:12.363730 4753 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="cab3ab80a8fddbc591cd07fdbed800f525bb8a1b5505bf818ebb59cbcce73003" exitCode=0 Jan 29 12:10:12 crc kubenswrapper[4753]: I0129 12:10:12.368277 4753 status_manager.go:851] "Failed to get status for pod" podUID="a9185c9f-17d7-4e5b-84cf-4be6d5233abd" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:12 crc kubenswrapper[4753]: I0129 12:10:12.368795 4753 status_manager.go:851] "Failed to get status for pod" podUID="318aa5db-6b19-4efe-8c5d-00fbb4a84b13" pod="openshift-marketplace/redhat-marketplace-7dkr6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-7dkr6\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:12 crc kubenswrapper[4753]: I0129 12:10:12.369396 4753 status_manager.go:851] "Failed to get status for pod" podUID="d818fc13-9863-4172-a818-4e01af393842" pod="openshift-marketplace/redhat-marketplace-b52ds" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-b52ds\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:12 crc kubenswrapper[4753]: I0129 12:10:12.370669 4753 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:12 crc kubenswrapper[4753]: I0129 12:10:12.371656 4753 status_manager.go:851] "Failed to get status for pod" podUID="d818fc13-9863-4172-a818-4e01af393842" pod="openshift-marketplace/redhat-marketplace-b52ds" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-b52ds\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:12 crc kubenswrapper[4753]: I0129 12:10:12.372253 4753 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:12 crc kubenswrapper[4753]: I0129 12:10:12.372867 4753 status_manager.go:851] "Failed to get status for pod" podUID="318aa5db-6b19-4efe-8c5d-00fbb4a84b13" pod="openshift-marketplace/redhat-marketplace-7dkr6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-7dkr6\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:12 crc kubenswrapper[4753]: I0129 12:10:12.373514 4753 status_manager.go:851] "Failed to get status for pod" podUID="a9185c9f-17d7-4e5b-84cf-4be6d5233abd" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:14 crc kubenswrapper[4753]: E0129 12:10:14.300745 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-b52ds" podUID="d818fc13-9863-4172-a818-4e01af393842" Jan 29 12:10:14 crc kubenswrapper[4753]: E0129 12:10:14.300991 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-7dkr6" podUID="318aa5db-6b19-4efe-8c5d-00fbb4a84b13" Jan 29 12:10:14 crc kubenswrapper[4753]: E0129 12:10:14.367347 4753 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Jan 29 12:10:14 crc kubenswrapper[4753]: E0129 12:10:14.367648 4753 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-pbc79,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-pcxth_openshift-marketplace(225f75d2-06ff-4a8e-ad48-5fb73aba9a5f): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 29 12:10:14 crc kubenswrapper[4753]: E0129 12:10:14.368911 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-pcxth" podUID="225f75d2-06ff-4a8e-ad48-5fb73aba9a5f" Jan 29 12:10:14 crc kubenswrapper[4753]: I0129 12:10:14.379977 4753 status_manager.go:851] "Failed to get status for pod" podUID="d818fc13-9863-4172-a818-4e01af393842" pod="openshift-marketplace/redhat-marketplace-b52ds" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-b52ds\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:14 crc kubenswrapper[4753]: I0129 12:10:14.380607 4753 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:14 crc kubenswrapper[4753]: I0129 12:10:14.381137 4753 status_manager.go:851] "Failed to get status for pod" podUID="225f75d2-06ff-4a8e-ad48-5fb73aba9a5f" pod="openshift-marketplace/certified-operators-pcxth" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-pcxth\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:14 crc kubenswrapper[4753]: I0129 12:10:14.381440 4753 status_manager.go:851] "Failed to get status for pod" podUID="a9185c9f-17d7-4e5b-84cf-4be6d5233abd" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:14 crc kubenswrapper[4753]: I0129 12:10:14.381670 4753 status_manager.go:851] "Failed to get status for pod" podUID="318aa5db-6b19-4efe-8c5d-00fbb4a84b13" pod="openshift-marketplace/redhat-marketplace-7dkr6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-7dkr6\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:16 crc kubenswrapper[4753]: E0129 12:10:16.887397 4753 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:16 crc kubenswrapper[4753]: E0129 12:10:16.888328 4753 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:16 crc kubenswrapper[4753]: E0129 12:10:16.888713 4753 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:16 crc kubenswrapper[4753]: E0129 12:10:16.889331 4753 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:16 crc kubenswrapper[4753]: E0129 12:10:16.889584 4753 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:16 crc kubenswrapper[4753]: I0129 12:10:16.889628 4753 controller.go:115] "failed to update lease using latest lease, fallback to ensure lease" err="failed 5 attempts to update lease" Jan 29 12:10:16 crc kubenswrapper[4753]: E0129 12:10:16.889910 4753 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.129.56.211:6443: connect: connection refused" interval="200ms" Jan 29 12:10:17 crc kubenswrapper[4753]: E0129 12:10:17.091188 4753 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.129.56.211:6443: connect: connection refused" interval="400ms" Jan 29 12:10:17 crc kubenswrapper[4753]: E0129 12:10:17.492637 4753 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.129.56.211:6443: connect: connection refused" interval="800ms" Jan 29 12:10:17 crc kubenswrapper[4753]: I0129 12:10:17.897325 4753 status_manager.go:851] "Failed to get status for pod" podUID="d818fc13-9863-4172-a818-4e01af393842" pod="openshift-marketplace/redhat-marketplace-b52ds" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-b52ds\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:17 crc kubenswrapper[4753]: I0129 12:10:17.897851 4753 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:17 crc kubenswrapper[4753]: I0129 12:10:17.898175 4753 status_manager.go:851] "Failed to get status for pod" podUID="225f75d2-06ff-4a8e-ad48-5fb73aba9a5f" pod="openshift-marketplace/certified-operators-pcxth" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-pcxth\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:17 crc kubenswrapper[4753]: I0129 12:10:17.898609 4753 status_manager.go:851] "Failed to get status for pod" podUID="a9185c9f-17d7-4e5b-84cf-4be6d5233abd" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:17 crc kubenswrapper[4753]: I0129 12:10:17.898880 4753 status_manager.go:851] "Failed to get status for pod" podUID="318aa5db-6b19-4efe-8c5d-00fbb4a84b13" pod="openshift-marketplace/redhat-marketplace-7dkr6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-7dkr6\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:18 crc kubenswrapper[4753]: E0129 12:10:18.293555 4753 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.129.56.211:6443: connect: connection refused" interval="1.6s" Jan 29 12:10:18 crc kubenswrapper[4753]: I0129 12:10:18.507276 4753 patch_prober.go:28] interesting pod/downloads-7954f5f757-b2rb9 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" start-of-body= Jan 29 12:10:18 crc kubenswrapper[4753]: I0129 12:10:18.508384 4753 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-b2rb9" podUID="200ffbfe-dff4-45e2-944d-34a3ad56f018" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" Jan 29 12:10:18 crc kubenswrapper[4753]: E0129 12:10:18.664147 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-pcxth" podUID="225f75d2-06ff-4a8e-ad48-5fb73aba9a5f" Jan 29 12:10:18 crc kubenswrapper[4753]: E0129 12:10:18.831338 4753 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Jan 29 12:10:18 crc kubenswrapper[4753]: E0129 12:10:18.831901 4753 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-62gc5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-5clxb_openshift-marketplace(574faad5-0a82-4ff3-b0a8-5390bfd3dc27): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 29 12:10:18 crc kubenswrapper[4753]: E0129 12:10:18.833654 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-5clxb" podUID="574faad5-0a82-4ff3-b0a8-5390bfd3dc27" Jan 29 12:10:18 crc kubenswrapper[4753]: E0129 12:10:18.834065 4753 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Jan 29 12:10:18 crc kubenswrapper[4753]: E0129 12:10:18.834329 4753 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-t7r8k,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-dgs2s_openshift-marketplace(13c02ae6-2d17-4b39-9f64-4a7a99a6ffc6): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 29 12:10:18 crc kubenswrapper[4753]: E0129 12:10:18.835528 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-dgs2s" podUID="13c02ae6-2d17-4b39-9f64-4a7a99a6ffc6" Jan 29 12:10:19 crc kubenswrapper[4753]: I0129 12:10:19.571163 4753 status_manager.go:851] "Failed to get status for pod" podUID="d818fc13-9863-4172-a818-4e01af393842" pod="openshift-marketplace/redhat-marketplace-b52ds" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-b52ds\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:19 crc kubenswrapper[4753]: I0129 12:10:19.571927 4753 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:19 crc kubenswrapper[4753]: I0129 12:10:19.572450 4753 status_manager.go:851] "Failed to get status for pod" podUID="225f75d2-06ff-4a8e-ad48-5fb73aba9a5f" pod="openshift-marketplace/certified-operators-pcxth" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-pcxth\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:19 crc kubenswrapper[4753]: I0129 12:10:19.572714 4753 status_manager.go:851] "Failed to get status for pod" podUID="a9185c9f-17d7-4e5b-84cf-4be6d5233abd" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:19 crc kubenswrapper[4753]: I0129 12:10:19.573015 4753 status_manager.go:851] "Failed to get status for pod" podUID="318aa5db-6b19-4efe-8c5d-00fbb4a84b13" pod="openshift-marketplace/redhat-marketplace-7dkr6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-7dkr6\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:19 crc kubenswrapper[4753]: I0129 12:10:19.573312 4753 status_manager.go:851] "Failed to get status for pod" podUID="574faad5-0a82-4ff3-b0a8-5390bfd3dc27" pod="openshift-marketplace/certified-operators-5clxb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-5clxb\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:19 crc kubenswrapper[4753]: I0129 12:10:19.573612 4753 status_manager.go:851] "Failed to get status for pod" podUID="13c02ae6-2d17-4b39-9f64-4a7a99a6ffc6" pod="openshift-marketplace/redhat-operators-dgs2s" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-dgs2s\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:19 crc kubenswrapper[4753]: I0129 12:10:19.574006 4753 status_manager.go:851] "Failed to get status for pod" podUID="13c02ae6-2d17-4b39-9f64-4a7a99a6ffc6" pod="openshift-marketplace/redhat-operators-dgs2s" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-dgs2s\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:19 crc kubenswrapper[4753]: I0129 12:10:19.574259 4753 status_manager.go:851] "Failed to get status for pod" podUID="d818fc13-9863-4172-a818-4e01af393842" pod="openshift-marketplace/redhat-marketplace-b52ds" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-b52ds\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:19 crc kubenswrapper[4753]: I0129 12:10:19.574508 4753 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:19 crc kubenswrapper[4753]: I0129 12:10:19.574963 4753 status_manager.go:851] "Failed to get status for pod" podUID="225f75d2-06ff-4a8e-ad48-5fb73aba9a5f" pod="openshift-marketplace/certified-operators-pcxth" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-pcxth\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:19 crc kubenswrapper[4753]: I0129 12:10:19.575270 4753 status_manager.go:851] "Failed to get status for pod" podUID="a9185c9f-17d7-4e5b-84cf-4be6d5233abd" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:19 crc kubenswrapper[4753]: I0129 12:10:19.575729 4753 status_manager.go:851] "Failed to get status for pod" podUID="318aa5db-6b19-4efe-8c5d-00fbb4a84b13" pod="openshift-marketplace/redhat-marketplace-7dkr6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-7dkr6\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:19 crc kubenswrapper[4753]: I0129 12:10:19.576018 4753 status_manager.go:851] "Failed to get status for pod" podUID="574faad5-0a82-4ff3-b0a8-5390bfd3dc27" pod="openshift-marketplace/certified-operators-5clxb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-5clxb\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:19 crc kubenswrapper[4753]: E0129 12:10:19.987457 4753 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.129.56.211:6443: connect: connection refused" interval="3.2s" Jan 29 12:10:20 crc kubenswrapper[4753]: E0129 12:10:20.906369 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-dgs2s" podUID="13c02ae6-2d17-4b39-9f64-4a7a99a6ffc6" Jan 29 12:10:20 crc kubenswrapper[4753]: E0129 12:10:20.906439 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-5clxb" podUID="574faad5-0a82-4ff3-b0a8-5390bfd3dc27" Jan 29 12:10:20 crc kubenswrapper[4753]: E0129 12:10:20.921789 4753 desired_state_of_world_populator.go:312] "Error processing volume" err="error processing PVC openshift-image-registry/crc-image-registry-storage: failed to fetch PVC from API server: Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/persistentvolumeclaims/crc-image-registry-storage\": dial tcp 38.129.56.211:6443: connect: connection refused" pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" volumeName="registry-storage" Jan 29 12:10:20 crc kubenswrapper[4753]: E0129 12:10:20.994776 4753 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Jan 29 12:10:20 crc kubenswrapper[4753]: E0129 12:10:20.994927 4753 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-mg8l8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-t89jd_openshift-marketplace(ce6846a0-6c85-4ae9-afae-b10ead46d21d): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 29 12:10:20 crc kubenswrapper[4753]: E0129 12:10:20.996106 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-t89jd" podUID="ce6846a0-6c85-4ae9-afae-b10ead46d21d" Jan 29 12:10:20 crc kubenswrapper[4753]: I0129 12:10:20.999135 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:20.999965 4753 status_manager.go:851] "Failed to get status for pod" podUID="225f75d2-06ff-4a8e-ad48-5fb73aba9a5f" pod="openshift-marketplace/certified-operators-pcxth" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-pcxth\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.000534 4753 status_manager.go:851] "Failed to get status for pod" podUID="318aa5db-6b19-4efe-8c5d-00fbb4a84b13" pod="openshift-marketplace/redhat-marketplace-7dkr6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-7dkr6\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.000861 4753 status_manager.go:851] "Failed to get status for pod" podUID="a9185c9f-17d7-4e5b-84cf-4be6d5233abd" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:21 crc kubenswrapper[4753]: E0129 12:10:21.000928 4753 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.001062 4753 status_manager.go:851] "Failed to get status for pod" podUID="574faad5-0a82-4ff3-b0a8-5390bfd3dc27" pod="openshift-marketplace/certified-operators-5clxb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-5clxb\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:21 crc kubenswrapper[4753]: E0129 12:10:21.001147 4753 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-zkq7z,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-bsxpd_openshift-marketplace(ae52688b-6f7a-441f-927b-ab547b7ce44f): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.001287 4753 status_manager.go:851] "Failed to get status for pod" podUID="13c02ae6-2d17-4b39-9f64-4a7a99a6ffc6" pod="openshift-marketplace/redhat-operators-dgs2s" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-dgs2s\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.001528 4753 status_manager.go:851] "Failed to get status for pod" podUID="d818fc13-9863-4172-a818-4e01af393842" pod="openshift-marketplace/redhat-marketplace-b52ds" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-b52ds\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.001792 4753 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:21 crc kubenswrapper[4753]: E0129 12:10:21.002844 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-bsxpd" podUID="ae52688b-6f7a-441f-927b-ab547b7ce44f" Jan 29 12:10:21 crc kubenswrapper[4753]: E0129 12:10:21.038463 4753 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Jan 29 12:10:21 crc kubenswrapper[4753]: E0129 12:10:21.038717 4753 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-gmml4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-z6xk4_openshift-marketplace(d0809beb-ae87-4bf7-aa2d-20dbe819c3cc): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 29 12:10:21 crc kubenswrapper[4753]: E0129 12:10:21.039986 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-z6xk4" podUID="d0809beb-ae87-4bf7-aa2d-20dbe819c3cc" Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.089736 4753 scope.go:117] "RemoveContainer" containerID="34151f1c4f743e8fa0a2c27e644bbea9c3911e2252501ad8aa95ad3e4346222a" Jan 29 12:10:21 crc kubenswrapper[4753]: W0129 12:10:21.123441 4753 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf85e55b1a89d02b0cb034b1ea31ed45a.slice/crio-29dc779c76a2c9f69c9e0cd43fad135681d6d62909631a08096204810e4e7e4c WatchSource:0}: Error finding container 29dc779c76a2c9f69c9e0cd43fad135681d6d62909631a08096204810e4e7e4c: Status 404 returned error can't find the container with id 29dc779c76a2c9f69c9e0cd43fad135681d6d62909631a08096204810e4e7e4c Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.124595 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/a9185c9f-17d7-4e5b-84cf-4be6d5233abd-kubelet-dir\") pod \"a9185c9f-17d7-4e5b-84cf-4be6d5233abd\" (UID: \"a9185c9f-17d7-4e5b-84cf-4be6d5233abd\") " Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.124734 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a9185c9f-17d7-4e5b-84cf-4be6d5233abd-kube-api-access\") pod \"a9185c9f-17d7-4e5b-84cf-4be6d5233abd\" (UID: \"a9185c9f-17d7-4e5b-84cf-4be6d5233abd\") " Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.124787 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/a9185c9f-17d7-4e5b-84cf-4be6d5233abd-var-lock\") pod \"a9185c9f-17d7-4e5b-84cf-4be6d5233abd\" (UID: \"a9185c9f-17d7-4e5b-84cf-4be6d5233abd\") " Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.125186 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a9185c9f-17d7-4e5b-84cf-4be6d5233abd-var-lock" (OuterVolumeSpecName: "var-lock") pod "a9185c9f-17d7-4e5b-84cf-4be6d5233abd" (UID: "a9185c9f-17d7-4e5b-84cf-4be6d5233abd"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.125300 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a9185c9f-17d7-4e5b-84cf-4be6d5233abd-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "a9185c9f-17d7-4e5b-84cf-4be6d5233abd" (UID: "a9185c9f-17d7-4e5b-84cf-4be6d5233abd"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.132269 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.133590 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a9185c9f-17d7-4e5b-84cf-4be6d5233abd-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "a9185c9f-17d7-4e5b-84cf-4be6d5233abd" (UID: "a9185c9f-17d7-4e5b-84cf-4be6d5233abd"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.135598 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.136164 4753 status_manager.go:851] "Failed to get status for pod" podUID="225f75d2-06ff-4a8e-ad48-5fb73aba9a5f" pod="openshift-marketplace/certified-operators-pcxth" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-pcxth\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.136447 4753 status_manager.go:851] "Failed to get status for pod" podUID="a9185c9f-17d7-4e5b-84cf-4be6d5233abd" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.136644 4753 status_manager.go:851] "Failed to get status for pod" podUID="318aa5db-6b19-4efe-8c5d-00fbb4a84b13" pod="openshift-marketplace/redhat-marketplace-7dkr6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-7dkr6\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.136904 4753 status_manager.go:851] "Failed to get status for pod" podUID="574faad5-0a82-4ff3-b0a8-5390bfd3dc27" pod="openshift-marketplace/certified-operators-5clxb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-5clxb\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.137096 4753 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.137302 4753 status_manager.go:851] "Failed to get status for pod" podUID="13c02ae6-2d17-4b39-9f64-4a7a99a6ffc6" pod="openshift-marketplace/redhat-operators-dgs2s" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-dgs2s\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.137476 4753 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.137652 4753 status_manager.go:851] "Failed to get status for pod" podUID="d818fc13-9863-4172-a818-4e01af393842" pod="openshift-marketplace/redhat-marketplace-b52ds" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-b52ds\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:21 crc kubenswrapper[4753]: E0129 12:10:21.217465 4753 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/events\": dial tcp 38.129.56.211:6443: connect: connection refused" event="&Event{ObjectMeta:{redhat-marketplace-7dkr6.188f326aac04b32e openshift-marketplace 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-marketplace,Name:redhat-marketplace-7dkr6,UID:318aa5db-6b19-4efe-8c5d-00fbb4a84b13,APIVersion:v1,ResourceVersion:28071,FieldPath:spec.initContainers{extract-content},},Reason:Failed,Message:Failed to pull image \"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\": rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-29 12:10:12.313805614 +0000 UTC m=+226.565887069,LastTimestamp:2026-01-29 12:10:12.313805614 +0000 UTC m=+226.565887069,Count:1,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.226698 4753 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/a9185c9f-17d7-4e5b-84cf-4be6d5233abd-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.226729 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a9185c9f-17d7-4e5b-84cf-4be6d5233abd-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.226756 4753 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/a9185c9f-17d7-4e5b-84cf-4be6d5233abd-var-lock\") on node \"crc\" DevicePath \"\"" Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.327798 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.327927 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.327972 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.328007 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir" (OuterVolumeSpecName: "cert-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "cert-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.328041 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.328180 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.328514 4753 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") on node \"crc\" DevicePath \"\"" Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.328533 4753 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") on node \"crc\" DevicePath \"\"" Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.328545 4753 reconciler_common.go:293] "Volume detached for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") on node \"crc\" DevicePath \"\"" Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.615435 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"a9185c9f-17d7-4e5b-84cf-4be6d5233abd","Type":"ContainerDied","Data":"b5b6dae4293eb08245393b4c968be2ebcdf0c35a4016f77610d86864fdc5ceba"} Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.615496 4753 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b5b6dae4293eb08245393b4c968be2ebcdf0c35a4016f77610d86864fdc5ceba" Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.615544 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.618990 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"dbb5b279c7f8838a84b1b3b2045df0266ab38c31c624a472794f3dae74d9ddc4"} Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.619027 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"29dc779c76a2c9f69c9e0cd43fad135681d6d62909631a08096204810e4e7e4c"} Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.620083 4753 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.620505 4753 status_manager.go:851] "Failed to get status for pod" podUID="d818fc13-9863-4172-a818-4e01af393842" pod="openshift-marketplace/redhat-marketplace-b52ds" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-b52ds\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.620881 4753 status_manager.go:851] "Failed to get status for pod" podUID="225f75d2-06ff-4a8e-ad48-5fb73aba9a5f" pod="openshift-marketplace/certified-operators-pcxth" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-pcxth\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.621095 4753 status_manager.go:851] "Failed to get status for pod" podUID="a9185c9f-17d7-4e5b-84cf-4be6d5233abd" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.621410 4753 status_manager.go:851] "Failed to get status for pod" podUID="318aa5db-6b19-4efe-8c5d-00fbb4a84b13" pod="openshift-marketplace/redhat-marketplace-7dkr6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-7dkr6\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.621666 4753 status_manager.go:851] "Failed to get status for pod" podUID="574faad5-0a82-4ff3-b0a8-5390bfd3dc27" pod="openshift-marketplace/certified-operators-5clxb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-5clxb\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.621860 4753 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.622353 4753 status_manager.go:851] "Failed to get status for pod" podUID="13c02ae6-2d17-4b39-9f64-4a7a99a6ffc6" pod="openshift-marketplace/redhat-operators-dgs2s" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-dgs2s\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.624412 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-b2rb9" event={"ID":"200ffbfe-dff4-45e2-944d-34a3ad56f018","Type":"ContainerStarted","Data":"c43805542a3469d34d53377647698e9d5207db5b34b4bad078b28cea7c645389"} Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.625378 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-b2rb9" Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.625414 4753 status_manager.go:851] "Failed to get status for pod" podUID="200ffbfe-dff4-45e2-944d-34a3ad56f018" pod="openshift-console/downloads-7954f5f757-b2rb9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console/pods/downloads-7954f5f757-b2rb9\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.625544 4753 patch_prober.go:28] interesting pod/downloads-7954f5f757-b2rb9 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" start-of-body= Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.625928 4753 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-b2rb9" podUID="200ffbfe-dff4-45e2-944d-34a3ad56f018" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.628000 4753 status_manager.go:851] "Failed to get status for pod" podUID="d818fc13-9863-4172-a818-4e01af393842" pod="openshift-marketplace/redhat-marketplace-b52ds" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-b52ds\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.629378 4753 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.629945 4753 status_manager.go:851] "Failed to get status for pod" podUID="225f75d2-06ff-4a8e-ad48-5fb73aba9a5f" pod="openshift-marketplace/certified-operators-pcxth" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-pcxth\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.630542 4753 status_manager.go:851] "Failed to get status for pod" podUID="318aa5db-6b19-4efe-8c5d-00fbb4a84b13" pod="openshift-marketplace/redhat-marketplace-7dkr6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-7dkr6\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.631006 4753 status_manager.go:851] "Failed to get status for pod" podUID="a9185c9f-17d7-4e5b-84cf-4be6d5233abd" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.631640 4753 status_manager.go:851] "Failed to get status for pod" podUID="574faad5-0a82-4ff3-b0a8-5390bfd3dc27" pod="openshift-marketplace/certified-operators-5clxb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-5clxb\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.631881 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.632494 4753 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.633432 4753 status_manager.go:851] "Failed to get status for pod" podUID="13c02ae6-2d17-4b39-9f64-4a7a99a6ffc6" pod="openshift-marketplace/redhat-operators-dgs2s" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-dgs2s\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.633566 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.633580 4753 scope.go:117] "RemoveContainer" containerID="e86597c91848f265cd70410d57ac60eadba1d0322b9b897097366a41c93d8134" Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.634084 4753 status_manager.go:851] "Failed to get status for pod" podUID="ce6846a0-6c85-4ae9-afae-b10ead46d21d" pod="openshift-marketplace/community-operators-t89jd" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-t89jd\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.634325 4753 status_manager.go:851] "Failed to get status for pod" podUID="225f75d2-06ff-4a8e-ad48-5fb73aba9a5f" pod="openshift-marketplace/certified-operators-pcxth" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-pcxth\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.634524 4753 status_manager.go:851] "Failed to get status for pod" podUID="a9185c9f-17d7-4e5b-84cf-4be6d5233abd" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.634765 4753 status_manager.go:851] "Failed to get status for pod" podUID="318aa5db-6b19-4efe-8c5d-00fbb4a84b13" pod="openshift-marketplace/redhat-marketplace-7dkr6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-7dkr6\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.635048 4753 status_manager.go:851] "Failed to get status for pod" podUID="574faad5-0a82-4ff3-b0a8-5390bfd3dc27" pod="openshift-marketplace/certified-operators-5clxb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-5clxb\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.635759 4753 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:21 crc kubenswrapper[4753]: E0129 12:10:21.636005 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-bsxpd" podUID="ae52688b-6f7a-441f-927b-ab547b7ce44f" Jan 29 12:10:21 crc kubenswrapper[4753]: E0129 12:10:21.636077 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-t89jd" podUID="ce6846a0-6c85-4ae9-afae-b10ead46d21d" Jan 29 12:10:21 crc kubenswrapper[4753]: E0129 12:10:21.636129 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-z6xk4" podUID="d0809beb-ae87-4bf7-aa2d-20dbe819c3cc" Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.636272 4753 status_manager.go:851] "Failed to get status for pod" podUID="13c02ae6-2d17-4b39-9f64-4a7a99a6ffc6" pod="openshift-marketplace/redhat-operators-dgs2s" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-dgs2s\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.636590 4753 status_manager.go:851] "Failed to get status for pod" podUID="200ffbfe-dff4-45e2-944d-34a3ad56f018" pod="openshift-console/downloads-7954f5f757-b2rb9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console/pods/downloads-7954f5f757-b2rb9\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.636867 4753 status_manager.go:851] "Failed to get status for pod" podUID="d818fc13-9863-4172-a818-4e01af393842" pod="openshift-marketplace/redhat-marketplace-b52ds" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-b52ds\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.637142 4753 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.637482 4753 status_manager.go:851] "Failed to get status for pod" podUID="574faad5-0a82-4ff3-b0a8-5390bfd3dc27" pod="openshift-marketplace/certified-operators-5clxb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-5clxb\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.637761 4753 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.638008 4753 status_manager.go:851] "Failed to get status for pod" podUID="13c02ae6-2d17-4b39-9f64-4a7a99a6ffc6" pod="openshift-marketplace/redhat-operators-dgs2s" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-dgs2s\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.638325 4753 status_manager.go:851] "Failed to get status for pod" podUID="d0809beb-ae87-4bf7-aa2d-20dbe819c3cc" pod="openshift-marketplace/redhat-operators-z6xk4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-z6xk4\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.639605 4753 status_manager.go:851] "Failed to get status for pod" podUID="200ffbfe-dff4-45e2-944d-34a3ad56f018" pod="openshift-console/downloads-7954f5f757-b2rb9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console/pods/downloads-7954f5f757-b2rb9\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.639857 4753 status_manager.go:851] "Failed to get status for pod" podUID="d818fc13-9863-4172-a818-4e01af393842" pod="openshift-marketplace/redhat-marketplace-b52ds" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-b52ds\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.640070 4753 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.640351 4753 status_manager.go:851] "Failed to get status for pod" podUID="ce6846a0-6c85-4ae9-afae-b10ead46d21d" pod="openshift-marketplace/community-operators-t89jd" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-t89jd\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.640662 4753 status_manager.go:851] "Failed to get status for pod" podUID="ae52688b-6f7a-441f-927b-ab547b7ce44f" pod="openshift-marketplace/community-operators-bsxpd" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-bsxpd\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.643355 4753 status_manager.go:851] "Failed to get status for pod" podUID="225f75d2-06ff-4a8e-ad48-5fb73aba9a5f" pod="openshift-marketplace/certified-operators-pcxth" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-pcxth\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.643633 4753 status_manager.go:851] "Failed to get status for pod" podUID="318aa5db-6b19-4efe-8c5d-00fbb4a84b13" pod="openshift-marketplace/redhat-marketplace-7dkr6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-7dkr6\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.643905 4753 status_manager.go:851] "Failed to get status for pod" podUID="a9185c9f-17d7-4e5b-84cf-4be6d5233abd" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.650520 4753 status_manager.go:851] "Failed to get status for pod" podUID="13c02ae6-2d17-4b39-9f64-4a7a99a6ffc6" pod="openshift-marketplace/redhat-operators-dgs2s" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-dgs2s\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.650706 4753 status_manager.go:851] "Failed to get status for pod" podUID="d0809beb-ae87-4bf7-aa2d-20dbe819c3cc" pod="openshift-marketplace/redhat-operators-z6xk4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-z6xk4\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.650917 4753 status_manager.go:851] "Failed to get status for pod" podUID="d818fc13-9863-4172-a818-4e01af393842" pod="openshift-marketplace/redhat-marketplace-b52ds" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-b52ds\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.651183 4753 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.651418 4753 status_manager.go:851] "Failed to get status for pod" podUID="200ffbfe-dff4-45e2-944d-34a3ad56f018" pod="openshift-console/downloads-7954f5f757-b2rb9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console/pods/downloads-7954f5f757-b2rb9\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.651613 4753 status_manager.go:851] "Failed to get status for pod" podUID="ae52688b-6f7a-441f-927b-ab547b7ce44f" pod="openshift-marketplace/community-operators-bsxpd" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-bsxpd\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.651814 4753 status_manager.go:851] "Failed to get status for pod" podUID="ce6846a0-6c85-4ae9-afae-b10ead46d21d" pod="openshift-marketplace/community-operators-t89jd" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-t89jd\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.652001 4753 status_manager.go:851] "Failed to get status for pod" podUID="225f75d2-06ff-4a8e-ad48-5fb73aba9a5f" pod="openshift-marketplace/certified-operators-pcxth" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-pcxth\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.652347 4753 status_manager.go:851] "Failed to get status for pod" podUID="a9185c9f-17d7-4e5b-84cf-4be6d5233abd" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.652653 4753 status_manager.go:851] "Failed to get status for pod" podUID="318aa5db-6b19-4efe-8c5d-00fbb4a84b13" pod="openshift-marketplace/redhat-marketplace-7dkr6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-7dkr6\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.652644 4753 scope.go:117] "RemoveContainer" containerID="7e02d977e34888952623f4ced5e073d7bf1ff360f8deb435dc577d2ecf146cb2" Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.652928 4753 status_manager.go:851] "Failed to get status for pod" podUID="574faad5-0a82-4ff3-b0a8-5390bfd3dc27" pod="openshift-marketplace/certified-operators-5clxb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-5clxb\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.653382 4753 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.669804 4753 scope.go:117] "RemoveContainer" containerID="7af8be0836a8ff7d08ff48d9487acdedd4b83127538d071d13b3215400c0f8de" Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.687943 4753 scope.go:117] "RemoveContainer" containerID="91d57a9cede640bdcec325175598b72dd00398436b730248728c7d9afd545b1e" Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.704490 4753 scope.go:117] "RemoveContainer" containerID="cab3ab80a8fddbc591cd07fdbed800f525bb8a1b5505bf818ebb59cbcce73003" Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.723183 4753 scope.go:117] "RemoveContainer" containerID="6538a934d097cd5a295952f64961a0a8fb5bfa5ebcd27a0a4dd3638029b9cdb1" Jan 29 12:10:21 crc kubenswrapper[4753]: I0129 12:10:21.910463 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4b27818a5e8e43d0dc095d08835c792" path="/var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/volumes" Jan 29 12:10:22 crc kubenswrapper[4753]: I0129 12:10:22.646030 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Jan 29 12:10:22 crc kubenswrapper[4753]: I0129 12:10:22.646109 4753 generic.go:334] "Generic (PLEG): container finished" podID="f614b9022728cf315e60c057852e563e" containerID="b4fd4169036d3a646c9e5e4bfa8d86a92c557c3d8f77608ae5308dd0b8c45517" exitCode=1 Jan 29 12:10:22 crc kubenswrapper[4753]: I0129 12:10:22.646210 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerDied","Data":"b4fd4169036d3a646c9e5e4bfa8d86a92c557c3d8f77608ae5308dd0b8c45517"} Jan 29 12:10:22 crc kubenswrapper[4753]: I0129 12:10:22.646906 4753 scope.go:117] "RemoveContainer" containerID="b4fd4169036d3a646c9e5e4bfa8d86a92c557c3d8f77608ae5308dd0b8c45517" Jan 29 12:10:22 crc kubenswrapper[4753]: I0129 12:10:22.647209 4753 patch_prober.go:28] interesting pod/downloads-7954f5f757-b2rb9 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" start-of-body= Jan 29 12:10:22 crc kubenswrapper[4753]: I0129 12:10:22.647271 4753 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-b2rb9" podUID="200ffbfe-dff4-45e2-944d-34a3ad56f018" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" Jan 29 12:10:22 crc kubenswrapper[4753]: I0129 12:10:22.647553 4753 status_manager.go:851] "Failed to get status for pod" podUID="13c02ae6-2d17-4b39-9f64-4a7a99a6ffc6" pod="openshift-marketplace/redhat-operators-dgs2s" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-dgs2s\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:22 crc kubenswrapper[4753]: I0129 12:10:22.648861 4753 status_manager.go:851] "Failed to get status for pod" podUID="d0809beb-ae87-4bf7-aa2d-20dbe819c3cc" pod="openshift-marketplace/redhat-operators-z6xk4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-z6xk4\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:22 crc kubenswrapper[4753]: I0129 12:10:22.649634 4753 status_manager.go:851] "Failed to get status for pod" podUID="200ffbfe-dff4-45e2-944d-34a3ad56f018" pod="openshift-console/downloads-7954f5f757-b2rb9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console/pods/downloads-7954f5f757-b2rb9\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:22 crc kubenswrapper[4753]: I0129 12:10:22.649961 4753 status_manager.go:851] "Failed to get status for pod" podUID="d818fc13-9863-4172-a818-4e01af393842" pod="openshift-marketplace/redhat-marketplace-b52ds" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-b52ds\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:22 crc kubenswrapper[4753]: I0129 12:10:22.650152 4753 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:22 crc kubenswrapper[4753]: I0129 12:10:22.650386 4753 status_manager.go:851] "Failed to get status for pod" podUID="ce6846a0-6c85-4ae9-afae-b10ead46d21d" pod="openshift-marketplace/community-operators-t89jd" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-t89jd\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:22 crc kubenswrapper[4753]: I0129 12:10:22.650587 4753 status_manager.go:851] "Failed to get status for pod" podUID="ae52688b-6f7a-441f-927b-ab547b7ce44f" pod="openshift-marketplace/community-operators-bsxpd" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-bsxpd\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:22 crc kubenswrapper[4753]: I0129 12:10:22.650748 4753 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:22 crc kubenswrapper[4753]: I0129 12:10:22.651053 4753 status_manager.go:851] "Failed to get status for pod" podUID="225f75d2-06ff-4a8e-ad48-5fb73aba9a5f" pod="openshift-marketplace/certified-operators-pcxth" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-pcxth\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:22 crc kubenswrapper[4753]: I0129 12:10:22.651443 4753 status_manager.go:851] "Failed to get status for pod" podUID="a9185c9f-17d7-4e5b-84cf-4be6d5233abd" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:22 crc kubenswrapper[4753]: I0129 12:10:22.651762 4753 status_manager.go:851] "Failed to get status for pod" podUID="318aa5db-6b19-4efe-8c5d-00fbb4a84b13" pod="openshift-marketplace/redhat-marketplace-7dkr6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-7dkr6\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:22 crc kubenswrapper[4753]: I0129 12:10:22.652147 4753 status_manager.go:851] "Failed to get status for pod" podUID="574faad5-0a82-4ff3-b0a8-5390bfd3dc27" pod="openshift-marketplace/certified-operators-5clxb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-5clxb\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:22 crc kubenswrapper[4753]: I0129 12:10:22.887790 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 12:10:22 crc kubenswrapper[4753]: I0129 12:10:22.889033 4753 status_manager.go:851] "Failed to get status for pod" podUID="200ffbfe-dff4-45e2-944d-34a3ad56f018" pod="openshift-console/downloads-7954f5f757-b2rb9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console/pods/downloads-7954f5f757-b2rb9\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:22 crc kubenswrapper[4753]: I0129 12:10:22.889305 4753 status_manager.go:851] "Failed to get status for pod" podUID="d818fc13-9863-4172-a818-4e01af393842" pod="openshift-marketplace/redhat-marketplace-b52ds" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-b52ds\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:22 crc kubenswrapper[4753]: I0129 12:10:22.889580 4753 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:22 crc kubenswrapper[4753]: I0129 12:10:22.889888 4753 status_manager.go:851] "Failed to get status for pod" podUID="ce6846a0-6c85-4ae9-afae-b10ead46d21d" pod="openshift-marketplace/community-operators-t89jd" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-t89jd\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:22 crc kubenswrapper[4753]: I0129 12:10:22.890143 4753 status_manager.go:851] "Failed to get status for pod" podUID="ae52688b-6f7a-441f-927b-ab547b7ce44f" pod="openshift-marketplace/community-operators-bsxpd" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-bsxpd\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:22 crc kubenswrapper[4753]: I0129 12:10:22.890581 4753 status_manager.go:851] "Failed to get status for pod" podUID="225f75d2-06ff-4a8e-ad48-5fb73aba9a5f" pod="openshift-marketplace/certified-operators-pcxth" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-pcxth\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:22 crc kubenswrapper[4753]: I0129 12:10:22.890777 4753 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:22 crc kubenswrapper[4753]: I0129 12:10:22.890934 4753 status_manager.go:851] "Failed to get status for pod" podUID="a9185c9f-17d7-4e5b-84cf-4be6d5233abd" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:22 crc kubenswrapper[4753]: I0129 12:10:22.891075 4753 status_manager.go:851] "Failed to get status for pod" podUID="318aa5db-6b19-4efe-8c5d-00fbb4a84b13" pod="openshift-marketplace/redhat-marketplace-7dkr6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-7dkr6\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:22 crc kubenswrapper[4753]: I0129 12:10:22.891306 4753 status_manager.go:851] "Failed to get status for pod" podUID="574faad5-0a82-4ff3-b0a8-5390bfd3dc27" pod="openshift-marketplace/certified-operators-5clxb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-5clxb\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:22 crc kubenswrapper[4753]: I0129 12:10:22.891471 4753 status_manager.go:851] "Failed to get status for pod" podUID="13c02ae6-2d17-4b39-9f64-4a7a99a6ffc6" pod="openshift-marketplace/redhat-operators-dgs2s" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-dgs2s\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:22 crc kubenswrapper[4753]: I0129 12:10:22.891628 4753 status_manager.go:851] "Failed to get status for pod" podUID="d0809beb-ae87-4bf7-aa2d-20dbe819c3cc" pod="openshift-marketplace/redhat-operators-z6xk4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-z6xk4\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:22 crc kubenswrapper[4753]: I0129 12:10:22.904901 4753 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="703a6b9d-a15d-4ed3-b00b-db7bd5d42c61" Jan 29 12:10:22 crc kubenswrapper[4753]: I0129 12:10:22.904962 4753 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="703a6b9d-a15d-4ed3-b00b-db7bd5d42c61" Jan 29 12:10:22 crc kubenswrapper[4753]: E0129 12:10:22.905293 4753 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.129.56.211:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 12:10:22 crc kubenswrapper[4753]: I0129 12:10:22.906738 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 12:10:22 crc kubenswrapper[4753]: W0129 12:10:22.954646 4753 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod71bb4a3aecc4ba5b26c4b7318770ce13.slice/crio-05120ee6418095ed16c8e9df78ff08dd0f3e6d36c9746052b4987a981f2d0406 WatchSource:0}: Error finding container 05120ee6418095ed16c8e9df78ff08dd0f3e6d36c9746052b4987a981f2d0406: Status 404 returned error can't find the container with id 05120ee6418095ed16c8e9df78ff08dd0f3e6d36c9746052b4987a981f2d0406 Jan 29 12:10:23 crc kubenswrapper[4753]: E0129 12:10:23.189023 4753 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.129.56.211:6443: connect: connection refused" interval="6.4s" Jan 29 12:10:23 crc kubenswrapper[4753]: I0129 12:10:23.237458 4753 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 29 12:10:23 crc kubenswrapper[4753]: I0129 12:10:23.659300 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Jan 29 12:10:23 crc kubenswrapper[4753]: I0129 12:10:23.659450 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"0ae904672ac068618f206453f0f788c8b4c98cc411572da88fb7e68e4e64e9f3"} Jan 29 12:10:23 crc kubenswrapper[4753]: I0129 12:10:23.661519 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"05120ee6418095ed16c8e9df78ff08dd0f3e6d36c9746052b4987a981f2d0406"} Jan 29 12:10:23 crc kubenswrapper[4753]: I0129 12:10:23.662429 4753 patch_prober.go:28] interesting pod/downloads-7954f5f757-b2rb9 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" start-of-body= Jan 29 12:10:23 crc kubenswrapper[4753]: I0129 12:10:23.662486 4753 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-b2rb9" podUID="200ffbfe-dff4-45e2-944d-34a3ad56f018" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" Jan 29 12:10:24 crc kubenswrapper[4753]: I0129 12:10:24.668129 4753 generic.go:334] "Generic (PLEG): container finished" podID="71bb4a3aecc4ba5b26c4b7318770ce13" containerID="6c9b7bd65bd63d3a59221ed4ed488b29bc32dfb914964e75db7973095124dba1" exitCode=0 Jan 29 12:10:24 crc kubenswrapper[4753]: I0129 12:10:24.668314 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerDied","Data":"6c9b7bd65bd63d3a59221ed4ed488b29bc32dfb914964e75db7973095124dba1"} Jan 29 12:10:24 crc kubenswrapper[4753]: I0129 12:10:24.669332 4753 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="703a6b9d-a15d-4ed3-b00b-db7bd5d42c61" Jan 29 12:10:24 crc kubenswrapper[4753]: I0129 12:10:24.669349 4753 status_manager.go:851] "Failed to get status for pod" podUID="13c02ae6-2d17-4b39-9f64-4a7a99a6ffc6" pod="openshift-marketplace/redhat-operators-dgs2s" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-dgs2s\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:24 crc kubenswrapper[4753]: I0129 12:10:24.669371 4753 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="703a6b9d-a15d-4ed3-b00b-db7bd5d42c61" Jan 29 12:10:24 crc kubenswrapper[4753]: E0129 12:10:24.669681 4753 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.129.56.211:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 12:10:24 crc kubenswrapper[4753]: I0129 12:10:24.669726 4753 status_manager.go:851] "Failed to get status for pod" podUID="d0809beb-ae87-4bf7-aa2d-20dbe819c3cc" pod="openshift-marketplace/redhat-operators-z6xk4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-z6xk4\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:24 crc kubenswrapper[4753]: I0129 12:10:24.670158 4753 status_manager.go:851] "Failed to get status for pod" podUID="200ffbfe-dff4-45e2-944d-34a3ad56f018" pod="openshift-console/downloads-7954f5f757-b2rb9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console/pods/downloads-7954f5f757-b2rb9\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:24 crc kubenswrapper[4753]: I0129 12:10:24.670997 4753 status_manager.go:851] "Failed to get status for pod" podUID="d818fc13-9863-4172-a818-4e01af393842" pod="openshift-marketplace/redhat-marketplace-b52ds" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-b52ds\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:24 crc kubenswrapper[4753]: I0129 12:10:24.671288 4753 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:24 crc kubenswrapper[4753]: I0129 12:10:24.671765 4753 status_manager.go:851] "Failed to get status for pod" podUID="ce6846a0-6c85-4ae9-afae-b10ead46d21d" pod="openshift-marketplace/community-operators-t89jd" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-t89jd\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:24 crc kubenswrapper[4753]: I0129 12:10:24.672069 4753 status_manager.go:851] "Failed to get status for pod" podUID="ae52688b-6f7a-441f-927b-ab547b7ce44f" pod="openshift-marketplace/community-operators-bsxpd" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-bsxpd\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:24 crc kubenswrapper[4753]: I0129 12:10:24.672339 4753 status_manager.go:851] "Failed to get status for pod" podUID="225f75d2-06ff-4a8e-ad48-5fb73aba9a5f" pod="openshift-marketplace/certified-operators-pcxth" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-pcxth\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:24 crc kubenswrapper[4753]: I0129 12:10:24.672660 4753 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:24 crc kubenswrapper[4753]: I0129 12:10:24.672942 4753 status_manager.go:851] "Failed to get status for pod" podUID="318aa5db-6b19-4efe-8c5d-00fbb4a84b13" pod="openshift-marketplace/redhat-marketplace-7dkr6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-7dkr6\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:24 crc kubenswrapper[4753]: I0129 12:10:24.673205 4753 status_manager.go:851] "Failed to get status for pod" podUID="a9185c9f-17d7-4e5b-84cf-4be6d5233abd" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:24 crc kubenswrapper[4753]: I0129 12:10:24.673498 4753 status_manager.go:851] "Failed to get status for pod" podUID="574faad5-0a82-4ff3-b0a8-5390bfd3dc27" pod="openshift-marketplace/certified-operators-5clxb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-5clxb\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:24 crc kubenswrapper[4753]: I0129 12:10:24.673930 4753 status_manager.go:851] "Failed to get status for pod" podUID="574faad5-0a82-4ff3-b0a8-5390bfd3dc27" pod="openshift-marketplace/certified-operators-5clxb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-5clxb\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:24 crc kubenswrapper[4753]: I0129 12:10:24.674284 4753 status_manager.go:851] "Failed to get status for pod" podUID="13c02ae6-2d17-4b39-9f64-4a7a99a6ffc6" pod="openshift-marketplace/redhat-operators-dgs2s" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-dgs2s\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:24 crc kubenswrapper[4753]: I0129 12:10:24.674575 4753 status_manager.go:851] "Failed to get status for pod" podUID="d0809beb-ae87-4bf7-aa2d-20dbe819c3cc" pod="openshift-marketplace/redhat-operators-z6xk4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-z6xk4\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:24 crc kubenswrapper[4753]: I0129 12:10:24.674855 4753 status_manager.go:851] "Failed to get status for pod" podUID="200ffbfe-dff4-45e2-944d-34a3ad56f018" pod="openshift-console/downloads-7954f5f757-b2rb9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console/pods/downloads-7954f5f757-b2rb9\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:24 crc kubenswrapper[4753]: I0129 12:10:24.675102 4753 status_manager.go:851] "Failed to get status for pod" podUID="d818fc13-9863-4172-a818-4e01af393842" pod="openshift-marketplace/redhat-marketplace-b52ds" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-b52ds\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:24 crc kubenswrapper[4753]: I0129 12:10:24.675389 4753 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:24 crc kubenswrapper[4753]: I0129 12:10:24.675687 4753 status_manager.go:851] "Failed to get status for pod" podUID="ce6846a0-6c85-4ae9-afae-b10ead46d21d" pod="openshift-marketplace/community-operators-t89jd" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-t89jd\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:24 crc kubenswrapper[4753]: I0129 12:10:24.675984 4753 status_manager.go:851] "Failed to get status for pod" podUID="ae52688b-6f7a-441f-927b-ab547b7ce44f" pod="openshift-marketplace/community-operators-bsxpd" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-bsxpd\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:24 crc kubenswrapper[4753]: I0129 12:10:24.676503 4753 status_manager.go:851] "Failed to get status for pod" podUID="225f75d2-06ff-4a8e-ad48-5fb73aba9a5f" pod="openshift-marketplace/certified-operators-pcxth" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-pcxth\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:24 crc kubenswrapper[4753]: I0129 12:10:24.676804 4753 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:24 crc kubenswrapper[4753]: I0129 12:10:24.677054 4753 status_manager.go:851] "Failed to get status for pod" podUID="318aa5db-6b19-4efe-8c5d-00fbb4a84b13" pod="openshift-marketplace/redhat-marketplace-7dkr6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-7dkr6\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:24 crc kubenswrapper[4753]: I0129 12:10:24.677380 4753 status_manager.go:851] "Failed to get status for pod" podUID="a9185c9f-17d7-4e5b-84cf-4be6d5233abd" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.129.56.211:6443: connect: connection refused" Jan 29 12:10:26 crc kubenswrapper[4753]: I0129 12:10:26.680890 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"165686845ae8c2eb5f5e7102800a0dd5d3523ffb37d7860f5cfad40d0e14cc2a"} Jan 29 12:10:27 crc kubenswrapper[4753]: I0129 12:10:27.688817 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"b0e08384f5f4406cd22ffdbf572065fba37c00ea4e7cef1aa64cd86b123b1bd1"} Jan 29 12:10:28 crc kubenswrapper[4753]: I0129 12:10:28.518514 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 29 12:10:28 crc kubenswrapper[4753]: I0129 12:10:28.519357 4753 patch_prober.go:28] interesting pod/downloads-7954f5f757-b2rb9 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" start-of-body= Jan 29 12:10:28 crc kubenswrapper[4753]: I0129 12:10:28.519595 4753 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-b2rb9" podUID="200ffbfe-dff4-45e2-944d-34a3ad56f018" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" Jan 29 12:10:28 crc kubenswrapper[4753]: I0129 12:10:28.519870 4753 patch_prober.go:28] interesting pod/downloads-7954f5f757-b2rb9 container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" start-of-body= Jan 29 12:10:28 crc kubenswrapper[4753]: I0129 12:10:28.522315 4753 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-b2rb9" podUID="200ffbfe-dff4-45e2-944d-34a3ad56f018" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" Jan 29 12:10:28 crc kubenswrapper[4753]: I0129 12:10:28.976060 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"c44732c2e6badf7df25962e0416906db5a74da9ba3b377649d20ae0f2b888d6f"} Jan 29 12:10:29 crc kubenswrapper[4753]: I0129 12:10:29.252518 4753 patch_prober.go:28] interesting pod/machine-config-daemon-7c24x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 12:10:29 crc kubenswrapper[4753]: I0129 12:10:29.252626 4753 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" podUID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 12:10:29 crc kubenswrapper[4753]: I0129 12:10:29.252689 4753 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" Jan 29 12:10:29 crc kubenswrapper[4753]: I0129 12:10:29.253451 4753 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"8391a68625fc3d8fdbc4f16b0b7b25e359798d08ce65ad8f482722910873418a"} pod="openshift-machine-config-operator/machine-config-daemon-7c24x" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 29 12:10:29 crc kubenswrapper[4753]: I0129 12:10:29.253555 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" podUID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" containerName="machine-config-daemon" containerID="cri-o://8391a68625fc3d8fdbc4f16b0b7b25e359798d08ce65ad8f482722910873418a" gracePeriod=600 Jan 29 12:10:30 crc kubenswrapper[4753]: I0129 12:10:30.017551 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"287ba01c90e5b1d57ac9b7bc0f8e1114fe87b6163d748a2f6b82e72fcde1c1eb"} Jan 29 12:10:30 crc kubenswrapper[4753]: I0129 12:10:30.034334 4753 generic.go:334] "Generic (PLEG): container finished" podID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" containerID="8391a68625fc3d8fdbc4f16b0b7b25e359798d08ce65ad8f482722910873418a" exitCode=0 Jan 29 12:10:30 crc kubenswrapper[4753]: I0129 12:10:30.034397 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" event={"ID":"b0310995-a7c7-47c3-ae6c-05daaaba92a6","Type":"ContainerDied","Data":"8391a68625fc3d8fdbc4f16b0b7b25e359798d08ce65ad8f482722910873418a"} Jan 29 12:10:31 crc kubenswrapper[4753]: I0129 12:10:31.805404 4753 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 29 12:10:31 crc kubenswrapper[4753]: I0129 12:10:31.853017 4753 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 29 12:10:33 crc kubenswrapper[4753]: I0129 12:10:33.181891 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"8c6afa9dce479d4a6e0a17b776d76870ea9ad8d065b1b315574136a98e227ea3"} Jan 29 12:10:33 crc kubenswrapper[4753]: I0129 12:10:33.182749 4753 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="703a6b9d-a15d-4ed3-b00b-db7bd5d42c61" Jan 29 12:10:33 crc kubenswrapper[4753]: I0129 12:10:33.182784 4753 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="703a6b9d-a15d-4ed3-b00b-db7bd5d42c61" Jan 29 12:10:33 crc kubenswrapper[4753]: I0129 12:10:33.183084 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 12:10:33 crc kubenswrapper[4753]: I0129 12:10:33.199622 4753 kubelet.go:1914] "Deleted mirror pod because it is outdated" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 12:10:33 crc kubenswrapper[4753]: I0129 12:10:33.206313 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" event={"ID":"b0310995-a7c7-47c3-ae6c-05daaaba92a6","Type":"ContainerStarted","Data":"427d33b9cdf4ec7ff547d57d07b232e8fe7e1dc88d7955ba9130fef447076573"} Jan 29 12:10:33 crc kubenswrapper[4753]: I0129 12:10:33.217906 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 29 12:10:33 crc kubenswrapper[4753]: I0129 12:10:33.459439 4753 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="6a7a3a6a-3445-4c0c-b036-574be0d83106" Jan 29 12:10:34 crc kubenswrapper[4753]: I0129 12:10:34.224594 4753 generic.go:334] "Generic (PLEG): container finished" podID="d818fc13-9863-4172-a818-4e01af393842" containerID="5f1e9c6fe1e30e2b371f1da5742b90fcbdfdddff41df4ea5c51ceaac7f7afef8" exitCode=0 Jan 29 12:10:34 crc kubenswrapper[4753]: I0129 12:10:34.224785 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-b52ds" event={"ID":"d818fc13-9863-4172-a818-4e01af393842","Type":"ContainerDied","Data":"5f1e9c6fe1e30e2b371f1da5742b90fcbdfdddff41df4ea5c51ceaac7f7afef8"} Jan 29 12:10:34 crc kubenswrapper[4753]: I0129 12:10:34.230247 4753 generic.go:334] "Generic (PLEG): container finished" podID="318aa5db-6b19-4efe-8c5d-00fbb4a84b13" containerID="451894115edc3e13f3ac8ed812be9d2d2711bdfb9dbacf7d456c803b3d386f87" exitCode=0 Jan 29 12:10:34 crc kubenswrapper[4753]: I0129 12:10:34.231429 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7dkr6" event={"ID":"318aa5db-6b19-4efe-8c5d-00fbb4a84b13","Type":"ContainerDied","Data":"451894115edc3e13f3ac8ed812be9d2d2711bdfb9dbacf7d456c803b3d386f87"} Jan 29 12:10:34 crc kubenswrapper[4753]: I0129 12:10:34.232330 4753 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="703a6b9d-a15d-4ed3-b00b-db7bd5d42c61" Jan 29 12:10:34 crc kubenswrapper[4753]: I0129 12:10:34.232367 4753 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="703a6b9d-a15d-4ed3-b00b-db7bd5d42c61" Jan 29 12:10:34 crc kubenswrapper[4753]: I0129 12:10:34.316083 4753 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="6a7a3a6a-3445-4c0c-b036-574be0d83106" Jan 29 12:10:37 crc kubenswrapper[4753]: I0129 12:10:37.666102 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7dkr6" event={"ID":"318aa5db-6b19-4efe-8c5d-00fbb4a84b13","Type":"ContainerStarted","Data":"7e5b472b91c06d451cc7b21999fbe372bbc0b9e7274b74bb9d6f37e446cbab33"} Jan 29 12:10:38 crc kubenswrapper[4753]: I0129 12:10:38.564995 4753 patch_prober.go:28] interesting pod/downloads-7954f5f757-b2rb9 container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" start-of-body= Jan 29 12:10:38 crc kubenswrapper[4753]: I0129 12:10:38.565455 4753 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-b2rb9" podUID="200ffbfe-dff4-45e2-944d-34a3ad56f018" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" Jan 29 12:10:38 crc kubenswrapper[4753]: I0129 12:10:38.565134 4753 patch_prober.go:28] interesting pod/downloads-7954f5f757-b2rb9 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" start-of-body= Jan 29 12:10:38 crc kubenswrapper[4753]: I0129 12:10:38.565872 4753 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-b2rb9" podUID="200ffbfe-dff4-45e2-944d-34a3ad56f018" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" Jan 29 12:10:38 crc kubenswrapper[4753]: I0129 12:10:38.681792 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dgs2s" event={"ID":"13c02ae6-2d17-4b39-9f64-4a7a99a6ffc6","Type":"ContainerStarted","Data":"94ba13ac0c6370a35cda4438762c0ee8998bc9580dfd603301d8a0ac86f5f218"} Jan 29 12:10:38 crc kubenswrapper[4753]: I0129 12:10:38.688500 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-b52ds" event={"ID":"d818fc13-9863-4172-a818-4e01af393842","Type":"ContainerStarted","Data":"b67b50d642d7d5f59a4bf6aad1af7773ac9559c76201c8079ec071a120ab7a49"} Jan 29 12:10:38 crc kubenswrapper[4753]: I0129 12:10:38.694337 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-z6xk4" event={"ID":"d0809beb-ae87-4bf7-aa2d-20dbe819c3cc","Type":"ContainerStarted","Data":"e7d2dbf70e86b9f470e3feb4e7b019ea755da1a818acc3e33e140b506b174c6e"} Jan 29 12:10:38 crc kubenswrapper[4753]: I0129 12:10:38.703469 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pcxth" event={"ID":"225f75d2-06ff-4a8e-ad48-5fb73aba9a5f","Type":"ContainerStarted","Data":"5ea372da213825ce784805ec1157049fbc3a85f84ec64a4a69d94a53be8e9f93"} Jan 29 12:10:38 crc kubenswrapper[4753]: I0129 12:10:38.710854 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5clxb" event={"ID":"574faad5-0a82-4ff3-b0a8-5390bfd3dc27","Type":"ContainerStarted","Data":"709fb49800c42dec000456fee3a98f0284de9438dd16d8dc91bfa0915053f2bc"} Jan 29 12:10:38 crc kubenswrapper[4753]: I0129 12:10:38.722146 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bsxpd" event={"ID":"ae52688b-6f7a-441f-927b-ab547b7ce44f","Type":"ContainerStarted","Data":"5432dfc5e349fdc2fce3a60c429c9129f30518e3f8c21a676dc46b7d8cc1879c"} Jan 29 12:10:41 crc kubenswrapper[4753]: I0129 12:10:41.911381 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-b52ds" Jan 29 12:10:41 crc kubenswrapper[4753]: I0129 12:10:41.932061 4753 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-b52ds" Jan 29 12:10:44 crc kubenswrapper[4753]: I0129 12:10:44.528024 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Jan 29 12:10:45 crc kubenswrapper[4753]: I0129 12:10:45.028930 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Jan 29 12:10:45 crc kubenswrapper[4753]: I0129 12:10:45.130827 4753 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Jan 29 12:10:45 crc kubenswrapper[4753]: I0129 12:10:45.136903 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Jan 29 12:10:45 crc kubenswrapper[4753]: I0129 12:10:45.205870 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Jan 29 12:10:46 crc kubenswrapper[4753]: I0129 12:10:46.887046 4753 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-7dkr6" Jan 29 12:10:46 crc kubenswrapper[4753]: I0129 12:10:46.887328 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-7dkr6" Jan 29 12:10:46 crc kubenswrapper[4753]: I0129 12:10:46.971551 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Jan 29 12:10:46 crc kubenswrapper[4753]: I0129 12:10:46.981861 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Jan 29 12:10:46 crc kubenswrapper[4753]: I0129 12:10:46.989446 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Jan 29 12:10:46 crc kubenswrapper[4753]: I0129 12:10:46.989781 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Jan 29 12:10:46 crc kubenswrapper[4753]: I0129 12:10:46.990084 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Jan 29 12:10:47 crc kubenswrapper[4753]: I0129 12:10:47.533547 4753 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-b52ds" podUID="d818fc13-9863-4172-a818-4e01af393842" containerName="registry-server" probeResult="failure" output="command timed out" Jan 29 12:10:47 crc kubenswrapper[4753]: I0129 12:10:47.535854 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Jan 29 12:10:48 crc kubenswrapper[4753]: I0129 12:10:48.114908 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Jan 29 12:10:48 crc kubenswrapper[4753]: I0129 12:10:48.115508 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Jan 29 12:10:48 crc kubenswrapper[4753]: I0129 12:10:48.115891 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Jan 29 12:10:48 crc kubenswrapper[4753]: I0129 12:10:48.115964 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Jan 29 12:10:50 crc kubenswrapper[4753]: I0129 12:10:48.124570 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Jan 29 12:10:50 crc kubenswrapper[4753]: I0129 12:10:50.265867 4753 patch_prober.go:28] interesting pod/downloads-7954f5f757-b2rb9 container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" start-of-body= Jan 29 12:10:50 crc kubenswrapper[4753]: I0129 12:10:50.265929 4753 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-b2rb9" podUID="200ffbfe-dff4-45e2-944d-34a3ad56f018" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" Jan 29 12:10:50 crc kubenswrapper[4753]: I0129 12:10:50.266016 4753 patch_prober.go:28] interesting pod/downloads-7954f5f757-b2rb9 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" start-of-body= Jan 29 12:10:50 crc kubenswrapper[4753]: I0129 12:10:50.266036 4753 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-b2rb9" podUID="200ffbfe-dff4-45e2-944d-34a3ad56f018" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" Jan 29 12:10:51 crc kubenswrapper[4753]: I0129 12:10:51.835635 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Jan 29 12:10:51 crc kubenswrapper[4753]: I0129 12:10:51.836661 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Jan 29 12:10:51 crc kubenswrapper[4753]: I0129 12:10:51.836895 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Jan 29 12:10:51 crc kubenswrapper[4753]: I0129 12:10:51.837123 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Jan 29 12:10:51 crc kubenswrapper[4753]: I0129 12:10:51.837344 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Jan 29 12:10:51 crc kubenswrapper[4753]: I0129 12:10:51.837560 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Jan 29 12:10:51 crc kubenswrapper[4753]: I0129 12:10:51.837743 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 29 12:10:51 crc kubenswrapper[4753]: I0129 12:10:51.837978 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Jan 29 12:10:51 crc kubenswrapper[4753]: I0129 12:10:51.838166 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Jan 29 12:10:51 crc kubenswrapper[4753]: I0129 12:10:51.838843 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Jan 29 12:10:51 crc kubenswrapper[4753]: I0129 12:10:51.839274 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Jan 29 12:10:51 crc kubenswrapper[4753]: I0129 12:10:51.839549 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Jan 29 12:10:51 crc kubenswrapper[4753]: I0129 12:10:51.839778 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Jan 29 12:10:51 crc kubenswrapper[4753]: I0129 12:10:51.839982 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Jan 29 12:10:51 crc kubenswrapper[4753]: I0129 12:10:51.840249 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Jan 29 12:10:51 crc kubenswrapper[4753]: I0129 12:10:51.840477 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Jan 29 12:10:51 crc kubenswrapper[4753]: I0129 12:10:51.840589 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Jan 29 12:10:51 crc kubenswrapper[4753]: I0129 12:10:51.840742 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Jan 29 12:10:51 crc kubenswrapper[4753]: I0129 12:10:51.840923 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Jan 29 12:10:51 crc kubenswrapper[4753]: I0129 12:10:51.843934 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Jan 29 12:10:51 crc kubenswrapper[4753]: I0129 12:10:51.844218 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.221930 4753 patch_prober.go:28] interesting pod/route-controller-manager-7657498d8f-596p2 container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.60:8443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.222009 4753 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-7657498d8f-596p2" podUID="bbffd8b6-cc70-40c7-a5fa-bb36544deb3b" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.60:8443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.222551 4753 patch_prober.go:28] interesting pod/route-controller-manager-7657498d8f-596p2 container/route-controller-manager namespace/openshift-route-controller-manager: Liveness probe status=failure output="Get \"https://10.217.0.60:8443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.222575 4753 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-route-controller-manager/route-controller-manager-7657498d8f-596p2" podUID="bbffd8b6-cc70-40c7-a5fa-bb36544deb3b" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.60:8443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.222619 4753 patch_prober.go:28] interesting pod/console-f9d7485db-mql86 container/console namespace/openshift-console: Readiness probe status=failure output="Get \"https://10.217.0.15:8443/health\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.222690 4753 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/console-f9d7485db-mql86" podUID="91845c42-ac54-4831-b8ac-73737902b703" containerName="console" probeResult="failure" output="Get \"https://10.217.0.15:8443/health\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.263672 4753 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-zdhjn container/oauth-openshift namespace/openshift-authentication: Liveness probe status=failure output="Get \"https://10.217.0.12:6443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.263787 4753 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-authentication/oauth-openshift-558db77b4-zdhjn" podUID="9728fd7e-6203-4082-9297-2d3fd9e17b74" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.12:6443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.263973 4753 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-f6z5s container/openshift-config-operator namespace/openshift-config-operator: Liveness probe status=failure output="Get \"https://10.217.0.11:8443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.264002 4753 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-f6z5s" podUID="4b74c52f-102d-45ae-a789-0c43429e8aa0" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.11:8443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.490761 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.490977 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.491148 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.491383 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.491500 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.491141 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.491654 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.491646 4753 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-79sb7 container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.33:8443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.491706 4753 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-79sb7" podUID="40dd9f5b-066f-4400-a4a1-6a7a9eda8c90" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.33:8443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.492016 4753 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-f6z5s container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.11:8443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.492040 4753 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-f6z5s" podUID="4b74c52f-102d-45ae-a789-0c43429e8aa0" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.11:8443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.505734 4753 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-7dkr6" podUID="318aa5db-6b19-4efe-8c5d-00fbb4a84b13" containerName="registry-server" probeResult="failure" output=< Jan 29 12:10:52 crc kubenswrapper[4753]: timeout: failed to connect service ":50051" within 1s Jan 29 12:10:52 crc kubenswrapper[4753]: > Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.538097 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.539087 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.539422 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.539743 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.540014 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.540335 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.540628 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.540882 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.543005 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.584866 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.585218 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.585276 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.585521 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.585593 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.585720 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.585860 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.586033 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.586191 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.586238 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.586250 4753 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.586285 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.586304 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.586381 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.586503 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.586567 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.586598 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.586677 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.586778 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.586600 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.586850 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.586923 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.587000 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.587079 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.587132 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.587459 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.587884 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Jan 29 12:10:52 crc kubenswrapper[4753]: E0129 12:10:52.612596 4753 kubelet.go:2526] "Housekeeping took longer than expected" err="housekeeping took too long" expected="1s" actual="2.346s" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.660921 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.661170 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.661271 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.661591 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.661977 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.662171 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.662336 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.663085 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.677033 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.677266 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.677397 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.677557 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.677668 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.677896 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.680795 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.705471 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.707699 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.708440 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.708876 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.708924 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.709060 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.709613 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.709816 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.709948 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.710095 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.710297 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.710322 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.710343 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.710483 4753 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.710493 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.710587 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.710607 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.710675 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.710687 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.710756 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.710761 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.710793 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.710831 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.710868 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.710881 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.710928 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.710962 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.710687 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.711019 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.711068 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.711084 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.711134 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.711150 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.711258 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.711376 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.711460 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.711549 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.711671 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.717242 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.719188 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.720665 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.730064 4753 generic.go:334] "Generic (PLEG): container finished" podID="574faad5-0a82-4ff3-b0a8-5390bfd3dc27" containerID="709fb49800c42dec000456fee3a98f0284de9438dd16d8dc91bfa0915053f2bc" exitCode=0 Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.805875 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.806889 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.807705 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.822938 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.823389 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Jan 29 12:10:52 crc kubenswrapper[4753]: I0129 12:10:52.840925 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Jan 29 12:10:54 crc kubenswrapper[4753]: I0129 12:10:52.851974 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Jan 29 12:10:54 crc kubenswrapper[4753]: I0129 12:10:52.861084 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Jan 29 12:10:54 crc kubenswrapper[4753]: I0129 12:10:53.022552 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Jan 29 12:10:54 crc kubenswrapper[4753]: I0129 12:10:53.022860 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Jan 29 12:10:54 crc kubenswrapper[4753]: I0129 12:10:53.082077 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Jan 29 12:10:54 crc kubenswrapper[4753]: I0129 12:10:53.083898 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Jan 29 12:10:54 crc kubenswrapper[4753]: I0129 12:10:53.103317 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Jan 29 12:10:54 crc kubenswrapper[4753]: I0129 12:10:53.373248 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Jan 29 12:10:54 crc kubenswrapper[4753]: I0129 12:10:53.373680 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Jan 29 12:10:54 crc kubenswrapper[4753]: I0129 12:10:53.461105 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 29 12:10:54 crc kubenswrapper[4753]: I0129 12:10:53.461696 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Jan 29 12:10:54 crc kubenswrapper[4753]: I0129 12:10:53.461783 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Jan 29 12:10:54 crc kubenswrapper[4753]: I0129 12:10:53.528012 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Jan 29 12:10:54 crc kubenswrapper[4753]: I0129 12:10:53.780098 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Jan 29 12:10:54 crc kubenswrapper[4753]: I0129 12:10:53.780443 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 29 12:10:54 crc kubenswrapper[4753]: I0129 12:10:53.780578 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Jan 29 12:10:54 crc kubenswrapper[4753]: I0129 12:10:53.793379 4753 generic.go:334] "Generic (PLEG): container finished" podID="225f75d2-06ff-4a8e-ad48-5fb73aba9a5f" containerID="5ea372da213825ce784805ec1157049fbc3a85f84ec64a4a69d94a53be8e9f93" exitCode=0 Jan 29 12:10:54 crc kubenswrapper[4753]: I0129 12:10:53.799688 4753 generic.go:334] "Generic (PLEG): container finished" podID="ae52688b-6f7a-441f-927b-ab547b7ce44f" containerID="5432dfc5e349fdc2fce3a60c429c9129f30518e3f8c21a676dc46b7d8cc1879c" exitCode=0 Jan 29 12:10:54 crc kubenswrapper[4753]: I0129 12:10:53.820408 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Jan 29 12:10:54 crc kubenswrapper[4753]: I0129 12:10:53.823619 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Jan 29 12:10:54 crc kubenswrapper[4753]: I0129 12:10:53.824127 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Jan 29 12:10:54 crc kubenswrapper[4753]: I0129 12:10:53.824297 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Jan 29 12:10:54 crc kubenswrapper[4753]: I0129 12:10:53.824425 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Jan 29 12:10:54 crc kubenswrapper[4753]: I0129 12:10:53.824529 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 29 12:10:54 crc kubenswrapper[4753]: I0129 12:10:53.826176 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Jan 29 12:10:54 crc kubenswrapper[4753]: I0129 12:10:53.826393 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Jan 29 12:10:54 crc kubenswrapper[4753]: I0129 12:10:53.831810 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Jan 29 12:10:54 crc kubenswrapper[4753]: I0129 12:10:53.834550 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Jan 29 12:10:54 crc kubenswrapper[4753]: I0129 12:10:53.842425 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Jan 29 12:10:54 crc kubenswrapper[4753]: I0129 12:10:53.963639 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Jan 29 12:10:54 crc kubenswrapper[4753]: I0129 12:10:53.963840 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Jan 29 12:10:54 crc kubenswrapper[4753]: E0129 12:10:54.136855 4753 kubelet.go:2526] "Housekeeping took longer than expected" err="housekeeping took too long" expected="1s" actual="1.524s" Jan 29 12:10:54 crc kubenswrapper[4753]: I0129 12:10:54.192544 4753 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-console/downloads-7954f5f757-b2rb9" Jan 29 12:10:54 crc kubenswrapper[4753]: I0129 12:10:54.192962 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5clxb" event={"ID":"574faad5-0a82-4ff3-b0a8-5390bfd3dc27","Type":"ContainerDied","Data":"709fb49800c42dec000456fee3a98f0284de9438dd16d8dc91bfa0915053f2bc"} Jan 29 12:10:54 crc kubenswrapper[4753]: I0129 12:10:54.193007 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pcxth" event={"ID":"225f75d2-06ff-4a8e-ad48-5fb73aba9a5f","Type":"ContainerDied","Data":"5ea372da213825ce784805ec1157049fbc3a85f84ec64a4a69d94a53be8e9f93"} Jan 29 12:10:54 crc kubenswrapper[4753]: I0129 12:10:54.193036 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bsxpd" event={"ID":"ae52688b-6f7a-441f-927b-ab547b7ce44f","Type":"ContainerDied","Data":"5432dfc5e349fdc2fce3a60c429c9129f30518e3f8c21a676dc46b7d8cc1879c"} Jan 29 12:10:54 crc kubenswrapper[4753]: I0129 12:10:54.202052 4753 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="download-server" containerStatusID={"Type":"cri-o","ID":"c43805542a3469d34d53377647698e9d5207db5b34b4bad078b28cea7c645389"} pod="openshift-console/downloads-7954f5f757-b2rb9" containerMessage="Container download-server failed liveness probe, will be restarted" Jan 29 12:10:54 crc kubenswrapper[4753]: I0129 12:10:54.202115 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/downloads-7954f5f757-b2rb9" podUID="200ffbfe-dff4-45e2-944d-34a3ad56f018" containerName="download-server" containerID="cri-o://c43805542a3469d34d53377647698e9d5207db5b34b4bad078b28cea7c645389" gracePeriod=2 Jan 29 12:10:54 crc kubenswrapper[4753]: I0129 12:10:54.204139 4753 patch_prober.go:28] interesting pod/downloads-7954f5f757-b2rb9 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" start-of-body= Jan 29 12:10:54 crc kubenswrapper[4753]: I0129 12:10:54.204182 4753 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-b2rb9" podUID="200ffbfe-dff4-45e2-944d-34a3ad56f018" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" Jan 29 12:10:54 crc kubenswrapper[4753]: E0129 12:10:54.402325 4753 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod225f75d2_06ff_4a8e_ad48_5fb73aba9a5f.slice/crio-conmon-5ea372da213825ce784805ec1157049fbc3a85f84ec64a4a69d94a53be8e9f93.scope\": RecentStats: unable to find data in memory cache]" Jan 29 12:10:54 crc kubenswrapper[4753]: I0129 12:10:54.794693 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Jan 29 12:10:54 crc kubenswrapper[4753]: I0129 12:10:54.795730 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Jan 29 12:10:54 crc kubenswrapper[4753]: I0129 12:10:54.795937 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Jan 29 12:10:54 crc kubenswrapper[4753]: I0129 12:10:54.796141 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Jan 29 12:10:54 crc kubenswrapper[4753]: I0129 12:10:54.800985 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Jan 29 12:10:54 crc kubenswrapper[4753]: I0129 12:10:54.801441 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Jan 29 12:10:54 crc kubenswrapper[4753]: I0129 12:10:54.801667 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Jan 29 12:10:54 crc kubenswrapper[4753]: I0129 12:10:54.802084 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Jan 29 12:10:54 crc kubenswrapper[4753]: I0129 12:10:54.803245 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Jan 29 12:10:54 crc kubenswrapper[4753]: I0129 12:10:54.806542 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Jan 29 12:10:54 crc kubenswrapper[4753]: I0129 12:10:54.831806 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Jan 29 12:10:54 crc kubenswrapper[4753]: I0129 12:10:54.832096 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Jan 29 12:10:54 crc kubenswrapper[4753]: I0129 12:10:54.832311 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Jan 29 12:10:54 crc kubenswrapper[4753]: I0129 12:10:54.832588 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Jan 29 12:10:54 crc kubenswrapper[4753]: I0129 12:10:54.844571 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Jan 29 12:10:54 crc kubenswrapper[4753]: I0129 12:10:54.879982 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Jan 29 12:10:54 crc kubenswrapper[4753]: I0129 12:10:54.893641 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Jan 29 12:10:54 crc kubenswrapper[4753]: I0129 12:10:54.899597 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Jan 29 12:10:54 crc kubenswrapper[4753]: I0129 12:10:54.959177 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Jan 29 12:10:55 crc kubenswrapper[4753]: I0129 12:10:55.019925 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Jan 29 12:10:55 crc kubenswrapper[4753]: I0129 12:10:55.337312 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Jan 29 12:10:55 crc kubenswrapper[4753]: I0129 12:10:55.337374 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Jan 29 12:10:55 crc kubenswrapper[4753]: I0129 12:10:55.337489 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Jan 29 12:10:55 crc kubenswrapper[4753]: I0129 12:10:55.337315 4753 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Jan 29 12:10:55 crc kubenswrapper[4753]: I0129 12:10:55.343734 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Jan 29 12:10:55 crc kubenswrapper[4753]: I0129 12:10:55.358634 4753 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-b52ds" podUID="d818fc13-9863-4172-a818-4e01af393842" containerName="registry-server" probeResult="failure" output=< Jan 29 12:10:55 crc kubenswrapper[4753]: timeout: failed to connect service ":50051" within 1s Jan 29 12:10:55 crc kubenswrapper[4753]: > Jan 29 12:10:55 crc kubenswrapper[4753]: I0129 12:10:55.368887 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Jan 29 12:10:55 crc kubenswrapper[4753]: I0129 12:10:55.525366 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Jan 29 12:10:55 crc kubenswrapper[4753]: I0129 12:10:55.525366 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 29 12:10:55 crc kubenswrapper[4753]: I0129 12:10:55.525539 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Jan 29 12:10:55 crc kubenswrapper[4753]: I0129 12:10:55.527817 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Jan 29 12:10:55 crc kubenswrapper[4753]: I0129 12:10:55.642567 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Jan 29 12:10:55 crc kubenswrapper[4753]: I0129 12:10:55.642927 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Jan 29 12:10:55 crc kubenswrapper[4753]: I0129 12:10:55.649946 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Jan 29 12:10:55 crc kubenswrapper[4753]: I0129 12:10:55.656448 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Jan 29 12:10:55 crc kubenswrapper[4753]: I0129 12:10:55.932136 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Jan 29 12:10:56 crc kubenswrapper[4753]: I0129 12:10:55.951529 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Jan 29 12:10:56 crc kubenswrapper[4753]: I0129 12:10:55.951965 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Jan 29 12:10:56 crc kubenswrapper[4753]: I0129 12:10:56.202773 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Jan 29 12:10:56 crc kubenswrapper[4753]: I0129 12:10:56.214580 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Jan 29 12:10:56 crc kubenswrapper[4753]: I0129 12:10:56.214844 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Jan 29 12:10:56 crc kubenswrapper[4753]: I0129 12:10:56.215734 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 29 12:10:56 crc kubenswrapper[4753]: I0129 12:10:56.215989 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Jan 29 12:10:56 crc kubenswrapper[4753]: I0129 12:10:56.231472 4753 generic.go:334] "Generic (PLEG): container finished" podID="200ffbfe-dff4-45e2-944d-34a3ad56f018" containerID="c43805542a3469d34d53377647698e9d5207db5b34b4bad078b28cea7c645389" exitCode=0 Jan 29 12:10:56 crc kubenswrapper[4753]: I0129 12:10:56.231524 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-b2rb9" event={"ID":"200ffbfe-dff4-45e2-944d-34a3ad56f018","Type":"ContainerDied","Data":"c43805542a3469d34d53377647698e9d5207db5b34b4bad078b28cea7c645389"} Jan 29 12:10:56 crc kubenswrapper[4753]: I0129 12:10:56.231562 4753 scope.go:117] "RemoveContainer" containerID="ef0121188c254337af0c007914b35d5b2619129cddc2287bb20ea16c8b65b6a3" Jan 29 12:10:56 crc kubenswrapper[4753]: I0129 12:10:56.407787 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 29 12:10:56 crc kubenswrapper[4753]: I0129 12:10:56.490977 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Jan 29 12:10:56 crc kubenswrapper[4753]: I0129 12:10:56.507933 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Jan 29 12:10:56 crc kubenswrapper[4753]: I0129 12:10:56.527031 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Jan 29 12:10:56 crc kubenswrapper[4753]: I0129 12:10:56.628958 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Jan 29 12:10:56 crc kubenswrapper[4753]: I0129 12:10:56.705603 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Jan 29 12:10:56 crc kubenswrapper[4753]: I0129 12:10:56.753095 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Jan 29 12:10:56 crc kubenswrapper[4753]: I0129 12:10:56.804810 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Jan 29 12:10:56 crc kubenswrapper[4753]: I0129 12:10:56.873843 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Jan 29 12:10:56 crc kubenswrapper[4753]: I0129 12:10:56.877523 4753 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-7dkr6" podUID="318aa5db-6b19-4efe-8c5d-00fbb4a84b13" containerName="registry-server" probeResult="failure" output=< Jan 29 12:10:56 crc kubenswrapper[4753]: timeout: failed to connect service ":50051" within 1s Jan 29 12:10:56 crc kubenswrapper[4753]: > Jan 29 12:10:57 crc kubenswrapper[4753]: I0129 12:10:57.170896 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Jan 29 12:10:57 crc kubenswrapper[4753]: I0129 12:10:57.322417 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-t89jd" event={"ID":"ce6846a0-6c85-4ae9-afae-b10ead46d21d","Type":"ContainerStarted","Data":"82b49ac4e60915453bc7c1f7c8e1ba68a6c643163b8f03b8eca0416b5c29ea2c"} Jan 29 12:10:57 crc kubenswrapper[4753]: I0129 12:10:57.331165 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-b2rb9" event={"ID":"200ffbfe-dff4-45e2-944d-34a3ad56f018","Type":"ContainerStarted","Data":"c856dc5681a48fd95b0bffeaab8c686fe562f4191c63b0c20a1beddedf54fd24"} Jan 29 12:10:57 crc kubenswrapper[4753]: I0129 12:10:57.331596 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-b2rb9" Jan 29 12:10:57 crc kubenswrapper[4753]: I0129 12:10:57.332774 4753 patch_prober.go:28] interesting pod/downloads-7954f5f757-b2rb9 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" start-of-body= Jan 29 12:10:57 crc kubenswrapper[4753]: I0129 12:10:57.332876 4753 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-b2rb9" podUID="200ffbfe-dff4-45e2-944d-34a3ad56f018" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" Jan 29 12:10:57 crc kubenswrapper[4753]: I0129 12:10:57.340569 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Jan 29 12:10:57 crc kubenswrapper[4753]: I0129 12:10:57.345037 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Jan 29 12:10:57 crc kubenswrapper[4753]: I0129 12:10:57.345119 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Jan 29 12:10:57 crc kubenswrapper[4753]: I0129 12:10:57.345152 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Jan 29 12:10:57 crc kubenswrapper[4753]: I0129 12:10:57.394063 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Jan 29 12:10:57 crc kubenswrapper[4753]: I0129 12:10:57.404047 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Jan 29 12:10:57 crc kubenswrapper[4753]: I0129 12:10:57.581565 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Jan 29 12:10:57 crc kubenswrapper[4753]: I0129 12:10:57.889683 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Jan 29 12:10:57 crc kubenswrapper[4753]: I0129 12:10:57.891434 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Jan 29 12:10:58 crc kubenswrapper[4753]: I0129 12:10:58.109888 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Jan 29 12:10:58 crc kubenswrapper[4753]: I0129 12:10:58.235597 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Jan 29 12:10:58 crc kubenswrapper[4753]: I0129 12:10:58.238681 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Jan 29 12:10:58 crc kubenswrapper[4753]: I0129 12:10:58.257643 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Jan 29 12:10:58 crc kubenswrapper[4753]: I0129 12:10:58.272608 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Jan 29 12:10:58 crc kubenswrapper[4753]: I0129 12:10:58.341553 4753 generic.go:334] "Generic (PLEG): container finished" podID="d0809beb-ae87-4bf7-aa2d-20dbe819c3cc" containerID="e7d2dbf70e86b9f470e3feb4e7b019ea755da1a818acc3e33e140b506b174c6e" exitCode=0 Jan 29 12:10:58 crc kubenswrapper[4753]: I0129 12:10:58.341650 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-z6xk4" event={"ID":"d0809beb-ae87-4bf7-aa2d-20dbe819c3cc","Type":"ContainerDied","Data":"e7d2dbf70e86b9f470e3feb4e7b019ea755da1a818acc3e33e140b506b174c6e"} Jan 29 12:10:58 crc kubenswrapper[4753]: I0129 12:10:58.344697 4753 generic.go:334] "Generic (PLEG): container finished" podID="13c02ae6-2d17-4b39-9f64-4a7a99a6ffc6" containerID="94ba13ac0c6370a35cda4438762c0ee8998bc9580dfd603301d8a0ac86f5f218" exitCode=0 Jan 29 12:10:58 crc kubenswrapper[4753]: I0129 12:10:58.344767 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dgs2s" event={"ID":"13c02ae6-2d17-4b39-9f64-4a7a99a6ffc6","Type":"ContainerDied","Data":"94ba13ac0c6370a35cda4438762c0ee8998bc9580dfd603301d8a0ac86f5f218"} Jan 29 12:10:58 crc kubenswrapper[4753]: I0129 12:10:58.345485 4753 patch_prober.go:28] interesting pod/downloads-7954f5f757-b2rb9 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" start-of-body= Jan 29 12:10:58 crc kubenswrapper[4753]: I0129 12:10:58.345549 4753 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-b2rb9" podUID="200ffbfe-dff4-45e2-944d-34a3ad56f018" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" Jan 29 12:10:58 crc kubenswrapper[4753]: I0129 12:10:58.408131 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Jan 29 12:10:58 crc kubenswrapper[4753]: I0129 12:10:58.506589 4753 patch_prober.go:28] interesting pod/downloads-7954f5f757-b2rb9 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" start-of-body= Jan 29 12:10:58 crc kubenswrapper[4753]: I0129 12:10:58.506676 4753 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-b2rb9" podUID="200ffbfe-dff4-45e2-944d-34a3ad56f018" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" Jan 29 12:10:58 crc kubenswrapper[4753]: I0129 12:10:58.507139 4753 patch_prober.go:28] interesting pod/downloads-7954f5f757-b2rb9 container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" start-of-body= Jan 29 12:10:58 crc kubenswrapper[4753]: I0129 12:10:58.507380 4753 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-b2rb9" podUID="200ffbfe-dff4-45e2-944d-34a3ad56f018" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" Jan 29 12:10:58 crc kubenswrapper[4753]: I0129 12:10:58.563027 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Jan 29 12:10:58 crc kubenswrapper[4753]: I0129 12:10:58.746777 4753 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Jan 29 12:10:58 crc kubenswrapper[4753]: I0129 12:10:58.752231 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podStartSLOduration=49.752174486 podStartE2EDuration="49.752174486s" podCreationTimestamp="2026-01-29 12:10:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:10:33.506147581 +0000 UTC m=+247.758229046" watchObservedRunningTime="2026-01-29 12:10:58.752174486 +0000 UTC m=+273.004255941" Jan 29 12:10:58 crc kubenswrapper[4753]: I0129 12:10:58.753322 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-b52ds" podStartSLOduration=32.906058441 podStartE2EDuration="2m18.75331447s" podCreationTimestamp="2026-01-29 12:08:40 +0000 UTC" firstStartedPulling="2026-01-29 12:08:51.044959585 +0000 UTC m=+145.297041040" lastFinishedPulling="2026-01-29 12:10:36.892215614 +0000 UTC m=+251.144297069" observedRunningTime="2026-01-29 12:10:38.710850805 +0000 UTC m=+252.962932270" watchObservedRunningTime="2026-01-29 12:10:58.75331447 +0000 UTC m=+273.005395935" Jan 29 12:10:58 crc kubenswrapper[4753]: I0129 12:10:58.754446 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-7dkr6" podStartSLOduration=36.165685247 podStartE2EDuration="2m18.754439653s" podCreationTimestamp="2026-01-29 12:08:40 +0000 UTC" firstStartedPulling="2026-01-29 12:08:54.282314613 +0000 UTC m=+148.534396068" lastFinishedPulling="2026-01-29 12:10:36.871069019 +0000 UTC m=+251.123150474" observedRunningTime="2026-01-29 12:10:37.696832629 +0000 UTC m=+251.948914084" watchObservedRunningTime="2026-01-29 12:10:58.754439653 +0000 UTC m=+273.006521108" Jan 29 12:10:58 crc kubenswrapper[4753]: I0129 12:10:58.755171 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 29 12:10:58 crc kubenswrapper[4753]: I0129 12:10:58.755225 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 29 12:10:58 crc kubenswrapper[4753]: I0129 12:10:58.763560 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 12:10:58 crc kubenswrapper[4753]: I0129 12:10:58.764621 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Jan 29 12:10:58 crc kubenswrapper[4753]: I0129 12:10:58.785288 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=25.785259934 podStartE2EDuration="25.785259934s" podCreationTimestamp="2026-01-29 12:10:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:10:58.783055819 +0000 UTC m=+273.035137284" watchObservedRunningTime="2026-01-29 12:10:58.785259934 +0000 UTC m=+273.037341389" Jan 29 12:10:58 crc kubenswrapper[4753]: I0129 12:10:58.787546 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Jan 29 12:10:58 crc kubenswrapper[4753]: I0129 12:10:58.814445 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Jan 29 12:10:59 crc kubenswrapper[4753]: I0129 12:10:59.307099 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Jan 29 12:10:59 crc kubenswrapper[4753]: I0129 12:10:59.355941 4753 generic.go:334] "Generic (PLEG): container finished" podID="ce6846a0-6c85-4ae9-afae-b10ead46d21d" containerID="82b49ac4e60915453bc7c1f7c8e1ba68a6c643163b8f03b8eca0416b5c29ea2c" exitCode=0 Jan 29 12:10:59 crc kubenswrapper[4753]: I0129 12:10:59.356226 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-t89jd" event={"ID":"ce6846a0-6c85-4ae9-afae-b10ead46d21d","Type":"ContainerDied","Data":"82b49ac4e60915453bc7c1f7c8e1ba68a6c643163b8f03b8eca0416b5c29ea2c"} Jan 29 12:11:02 crc kubenswrapper[4753]: I0129 12:11:02.060453 4753 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-b52ds" Jan 29 12:11:02 crc kubenswrapper[4753]: I0129 12:11:02.374200 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-b52ds" Jan 29 12:11:02 crc kubenswrapper[4753]: I0129 12:11:02.798130 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bsxpd" event={"ID":"ae52688b-6f7a-441f-927b-ab547b7ce44f","Type":"ContainerStarted","Data":"52f05030c12ab9e8660e14b119e79d18cd261ba1485966914b56eb53990558c5"} Jan 29 12:11:02 crc kubenswrapper[4753]: I0129 12:11:02.818115 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-bsxpd" podStartSLOduration=17.649174097 podStartE2EDuration="2m25.818007925s" podCreationTimestamp="2026-01-29 12:08:37 +0000 UTC" firstStartedPulling="2026-01-29 12:08:51.312555265 +0000 UTC m=+145.564636720" lastFinishedPulling="2026-01-29 12:10:59.481389083 +0000 UTC m=+273.733470548" observedRunningTime="2026-01-29 12:11:02.817259133 +0000 UTC m=+277.069340598" watchObservedRunningTime="2026-01-29 12:11:02.818007925 +0000 UTC m=+277.070089380" Jan 29 12:11:02 crc kubenswrapper[4753]: I0129 12:11:02.907529 4753 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 12:11:02 crc kubenswrapper[4753]: I0129 12:11:02.909681 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 12:11:02 crc kubenswrapper[4753]: I0129 12:11:02.918622 4753 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 12:11:03 crc kubenswrapper[4753]: I0129 12:11:03.807496 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5clxb" event={"ID":"574faad5-0a82-4ff3-b0a8-5390bfd3dc27","Type":"ContainerStarted","Data":"ac2f0ade620830ba2b7f7881563fefad779b1fb0b8490876d6338c021f3e14f5"} Jan 29 12:11:04 crc kubenswrapper[4753]: I0129 12:11:04.005411 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 12:11:04 crc kubenswrapper[4753]: I0129 12:11:04.841667 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-5clxb" podStartSLOduration=15.392559662 podStartE2EDuration="2m26.841643484s" podCreationTimestamp="2026-01-29 12:08:38 +0000 UTC" firstStartedPulling="2026-01-29 12:08:51.21527361 +0000 UTC m=+145.467355065" lastFinishedPulling="2026-01-29 12:11:02.664357422 +0000 UTC m=+276.916438887" observedRunningTime="2026-01-29 12:11:04.839717017 +0000 UTC m=+279.091798482" watchObservedRunningTime="2026-01-29 12:11:04.841643484 +0000 UTC m=+279.093724939" Jan 29 12:11:08 crc kubenswrapper[4753]: I0129 12:11:05.420733 4753 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-7dkr6" Jan 29 12:11:08 crc kubenswrapper[4753]: I0129 12:11:05.545228 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-7dkr6" Jan 29 12:11:08 crc kubenswrapper[4753]: I0129 12:11:08.300705 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pcxth" event={"ID":"225f75d2-06ff-4a8e-ad48-5fb73aba9a5f","Type":"ContainerStarted","Data":"2338eccd74c61d27c2d4f9866a4563469740efcc951b65081239082b950b42a8"} Jan 29 12:11:08 crc kubenswrapper[4753]: I0129 12:11:08.326342 4753 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Jan 29 12:11:08 crc kubenswrapper[4753]: I0129 12:11:08.326943 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" containerID="cri-o://dbb5b279c7f8838a84b1b3b2045df0266ab38c31c624a472794f3dae74d9ddc4" gracePeriod=5 Jan 29 12:11:08 crc kubenswrapper[4753]: I0129 12:11:08.507694 4753 patch_prober.go:28] interesting pod/downloads-7954f5f757-b2rb9 container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" start-of-body= Jan 29 12:11:08 crc kubenswrapper[4753]: I0129 12:11:08.508146 4753 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-b2rb9" podUID="200ffbfe-dff4-45e2-944d-34a3ad56f018" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" Jan 29 12:11:08 crc kubenswrapper[4753]: I0129 12:11:08.508333 4753 patch_prober.go:28] interesting pod/downloads-7954f5f757-b2rb9 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" start-of-body= Jan 29 12:11:08 crc kubenswrapper[4753]: I0129 12:11:08.508438 4753 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-b2rb9" podUID="200ffbfe-dff4-45e2-944d-34a3ad56f018" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" Jan 29 12:11:09 crc kubenswrapper[4753]: I0129 12:11:09.454070 4753 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-pcxth" Jan 29 12:11:09 crc kubenswrapper[4753]: I0129 12:11:09.454177 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-pcxth" Jan 29 12:11:09 crc kubenswrapper[4753]: I0129 12:11:09.649160 4753 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-bsxpd" Jan 29 12:11:09 crc kubenswrapper[4753]: I0129 12:11:09.649223 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-bsxpd" Jan 29 12:11:09 crc kubenswrapper[4753]: I0129 12:11:09.700060 4753 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-bsxpd" Jan 29 12:11:09 crc kubenswrapper[4753]: I0129 12:11:09.727319 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-pcxth" podStartSLOduration=17.602277372 podStartE2EDuration="2m32.727264146s" podCreationTimestamp="2026-01-29 12:08:37 +0000 UTC" firstStartedPulling="2026-01-29 12:08:51.118927062 +0000 UTC m=+145.371008517" lastFinishedPulling="2026-01-29 12:11:06.243913836 +0000 UTC m=+280.495995291" observedRunningTime="2026-01-29 12:11:09.342134761 +0000 UTC m=+283.594216236" watchObservedRunningTime="2026-01-29 12:11:09.727264146 +0000 UTC m=+283.979345591" Jan 29 12:11:09 crc kubenswrapper[4753]: I0129 12:11:09.787813 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-5clxb" Jan 29 12:11:09 crc kubenswrapper[4753]: I0129 12:11:09.788131 4753 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-5clxb" Jan 29 12:11:09 crc kubenswrapper[4753]: I0129 12:11:09.963581 4753 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-5clxb" Jan 29 12:11:10 crc kubenswrapper[4753]: I0129 12:11:10.455585 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-bsxpd" Jan 29 12:11:10 crc kubenswrapper[4753]: I0129 12:11:10.465818 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-5clxb" Jan 29 12:11:10 crc kubenswrapper[4753]: I0129 12:11:10.648552 4753 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-pcxth" podUID="225f75d2-06ff-4a8e-ad48-5fb73aba9a5f" containerName="registry-server" probeResult="failure" output=< Jan 29 12:11:10 crc kubenswrapper[4753]: timeout: failed to connect service ":50051" within 1s Jan 29 12:11:10 crc kubenswrapper[4753]: > Jan 29 12:11:12 crc kubenswrapper[4753]: I0129 12:11:12.123064 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-55788fcfd7-spvgx"] Jan 29 12:11:12 crc kubenswrapper[4753]: I0129 12:11:12.123466 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-55788fcfd7-spvgx" podUID="36e98c0d-195d-45f0-9ecb-c83ed362a56b" containerName="controller-manager" containerID="cri-o://de5712b884dcb2dba2784a29f8a07a21e33ce657aa22ddcef34f8a79439d3703" gracePeriod=30 Jan 29 12:11:12 crc kubenswrapper[4753]: I0129 12:11:12.127990 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7657498d8f-596p2"] Jan 29 12:11:12 crc kubenswrapper[4753]: I0129 12:11:12.133511 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-7657498d8f-596p2" podUID="bbffd8b6-cc70-40c7-a5fa-bb36544deb3b" containerName="route-controller-manager" containerID="cri-o://789f686350a37c412441d5d553653c635a6b80fed4f60fc3433c350500e695c9" gracePeriod=30 Jan 29 12:11:13 crc kubenswrapper[4753]: I0129 12:11:13.471444 4753 generic.go:334] "Generic (PLEG): container finished" podID="36e98c0d-195d-45f0-9ecb-c83ed362a56b" containerID="de5712b884dcb2dba2784a29f8a07a21e33ce657aa22ddcef34f8a79439d3703" exitCode=0 Jan 29 12:11:13 crc kubenswrapper[4753]: I0129 12:11:13.471866 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-55788fcfd7-spvgx" event={"ID":"36e98c0d-195d-45f0-9ecb-c83ed362a56b","Type":"ContainerDied","Data":"de5712b884dcb2dba2784a29f8a07a21e33ce657aa22ddcef34f8a79439d3703"} Jan 29 12:11:14 crc kubenswrapper[4753]: I0129 12:11:14.479491 4753 generic.go:334] "Generic (PLEG): container finished" podID="bbffd8b6-cc70-40c7-a5fa-bb36544deb3b" containerID="789f686350a37c412441d5d553653c635a6b80fed4f60fc3433c350500e695c9" exitCode=0 Jan 29 12:11:14 crc kubenswrapper[4753]: I0129 12:11:14.479539 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7657498d8f-596p2" event={"ID":"bbffd8b6-cc70-40c7-a5fa-bb36544deb3b","Type":"ContainerDied","Data":"789f686350a37c412441d5d553653c635a6b80fed4f60fc3433c350500e695c9"} Jan 29 12:11:15 crc kubenswrapper[4753]: I0129 12:11:15.535991 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Jan 29 12:11:15 crc kubenswrapper[4753]: I0129 12:11:15.536099 4753 generic.go:334] "Generic (PLEG): container finished" podID="f85e55b1a89d02b0cb034b1ea31ed45a" containerID="dbb5b279c7f8838a84b1b3b2045df0266ab38c31c624a472794f3dae74d9ddc4" exitCode=137 Jan 29 12:11:15 crc kubenswrapper[4753]: I0129 12:11:15.859951 4753 patch_prober.go:28] interesting pod/controller-manager-55788fcfd7-spvgx container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.57:8443/healthz\": dial tcp 10.217.0.57:8443: connect: connection refused" start-of-body= Jan 29 12:11:15 crc kubenswrapper[4753]: I0129 12:11:15.860131 4753 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-55788fcfd7-spvgx" podUID="36e98c0d-195d-45f0-9ecb-c83ed362a56b" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.57:8443/healthz\": dial tcp 10.217.0.57:8443: connect: connection refused" Jan 29 12:11:16 crc kubenswrapper[4753]: I0129 12:11:16.224618 4753 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Jan 29 12:11:18 crc kubenswrapper[4753]: I0129 12:11:18.507306 4753 patch_prober.go:28] interesting pod/downloads-7954f5f757-b2rb9 container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" start-of-body= Jan 29 12:11:18 crc kubenswrapper[4753]: I0129 12:11:18.507306 4753 patch_prober.go:28] interesting pod/downloads-7954f5f757-b2rb9 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" start-of-body= Jan 29 12:11:18 crc kubenswrapper[4753]: I0129 12:11:18.507936 4753 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-b2rb9" podUID="200ffbfe-dff4-45e2-944d-34a3ad56f018" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" Jan 29 12:11:18 crc kubenswrapper[4753]: I0129 12:11:18.508080 4753 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-console/downloads-7954f5f757-b2rb9" Jan 29 12:11:18 crc kubenswrapper[4753]: I0129 12:11:18.507975 4753 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-b2rb9" podUID="200ffbfe-dff4-45e2-944d-34a3ad56f018" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" Jan 29 12:11:18 crc kubenswrapper[4753]: I0129 12:11:18.508793 4753 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="download-server" containerStatusID={"Type":"cri-o","ID":"c856dc5681a48fd95b0bffeaab8c686fe562f4191c63b0c20a1beddedf54fd24"} pod="openshift-console/downloads-7954f5f757-b2rb9" containerMessage="Container download-server failed liveness probe, will be restarted" Jan 29 12:11:18 crc kubenswrapper[4753]: I0129 12:11:18.508836 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/downloads-7954f5f757-b2rb9" podUID="200ffbfe-dff4-45e2-944d-34a3ad56f018" containerName="download-server" containerID="cri-o://c856dc5681a48fd95b0bffeaab8c686fe562f4191c63b0c20a1beddedf54fd24" gracePeriod=2 Jan 29 12:11:18 crc kubenswrapper[4753]: I0129 12:11:18.509320 4753 patch_prober.go:28] interesting pod/downloads-7954f5f757-b2rb9 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" start-of-body= Jan 29 12:11:18 crc kubenswrapper[4753]: I0129 12:11:18.509346 4753 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-b2rb9" podUID="200ffbfe-dff4-45e2-944d-34a3ad56f018" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" Jan 29 12:11:18 crc kubenswrapper[4753]: I0129 12:11:18.704819 4753 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-lswml container/marketplace-operator namespace/openshift-marketplace: Liveness probe status=failure output="Get \"http://10.217.0.38:8080/healthz\": dial tcp 10.217.0.38:8080: connect: connection refused" start-of-body= Jan 29 12:11:18 crc kubenswrapper[4753]: I0129 12:11:18.704837 4753 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-lswml container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.38:8080/healthz\": dial tcp 10.217.0.38:8080: connect: connection refused" start-of-body= Jan 29 12:11:18 crc kubenswrapper[4753]: I0129 12:11:18.704891 4753 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-marketplace/marketplace-operator-79b997595-lswml" podUID="dad90295-65db-470c-8041-19fcf86d0439" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.38:8080/healthz\": dial tcp 10.217.0.38:8080: connect: connection refused" Jan 29 12:11:18 crc kubenswrapper[4753]: I0129 12:11:18.704922 4753 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-lswml" podUID="dad90295-65db-470c-8041-19fcf86d0439" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.38:8080/healthz\": dial tcp 10.217.0.38:8080: connect: connection refused" Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.283716 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.284135 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.284655 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7657498d8f-596p2" Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.292238 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-55788fcfd7-spvgx" Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.317879 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-f846b69fd-w82f2"] Jan 29 12:11:19 crc kubenswrapper[4753]: E0129 12:11:19.318471 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.318513 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Jan 29 12:11:19 crc kubenswrapper[4753]: E0129 12:11:19.318553 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a9185c9f-17d7-4e5b-84cf-4be6d5233abd" containerName="installer" Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.318560 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="a9185c9f-17d7-4e5b-84cf-4be6d5233abd" containerName="installer" Jan 29 12:11:19 crc kubenswrapper[4753]: E0129 12:11:19.318573 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="36e98c0d-195d-45f0-9ecb-c83ed362a56b" containerName="controller-manager" Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.318581 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="36e98c0d-195d-45f0-9ecb-c83ed362a56b" containerName="controller-manager" Jan 29 12:11:19 crc kubenswrapper[4753]: E0129 12:11:19.318595 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bbffd8b6-cc70-40c7-a5fa-bb36544deb3b" containerName="route-controller-manager" Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.318601 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="bbffd8b6-cc70-40c7-a5fa-bb36544deb3b" containerName="route-controller-manager" Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.318836 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="bbffd8b6-cc70-40c7-a5fa-bb36544deb3b" containerName="route-controller-manager" Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.318858 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="a9185c9f-17d7-4e5b-84cf-4be6d5233abd" containerName="installer" Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.318871 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="36e98c0d-195d-45f0-9ecb-c83ed362a56b" containerName="controller-manager" Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.318898 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.320068 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-f846b69fd-w82f2" Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.407203 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/bbffd8b6-cc70-40c7-a5fa-bb36544deb3b-client-ca\") pod \"bbffd8b6-cc70-40c7-a5fa-bb36544deb3b\" (UID: \"bbffd8b6-cc70-40c7-a5fa-bb36544deb3b\") " Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.407308 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/36e98c0d-195d-45f0-9ecb-c83ed362a56b-client-ca\") pod \"36e98c0d-195d-45f0-9ecb-c83ed362a56b\" (UID: \"36e98c0d-195d-45f0-9ecb-c83ed362a56b\") " Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.407366 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/36e98c0d-195d-45f0-9ecb-c83ed362a56b-serving-cert\") pod \"36e98c0d-195d-45f0-9ecb-c83ed362a56b\" (UID: \"36e98c0d-195d-45f0-9ecb-c83ed362a56b\") " Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.407412 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.407503 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p8988\" (UniqueName: \"kubernetes.io/projected/bbffd8b6-cc70-40c7-a5fa-bb36544deb3b-kube-api-access-p8988\") pod \"bbffd8b6-cc70-40c7-a5fa-bb36544deb3b\" (UID: \"bbffd8b6-cc70-40c7-a5fa-bb36544deb3b\") " Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.407632 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.407744 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.408613 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.408493 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log" (OuterVolumeSpecName: "var-log") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.408712 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/36e98c0d-195d-45f0-9ecb-c83ed362a56b-client-ca" (OuterVolumeSpecName: "client-ca") pod "36e98c0d-195d-45f0-9ecb-c83ed362a56b" (UID: "36e98c0d-195d-45f0-9ecb-c83ed362a56b"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.409010 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bbffd8b6-cc70-40c7-a5fa-bb36544deb3b-client-ca" (OuterVolumeSpecName: "client-ca") pod "bbffd8b6-cc70-40c7-a5fa-bb36544deb3b" (UID: "bbffd8b6-cc70-40c7-a5fa-bb36544deb3b"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.409135 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.409192 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bbffd8b6-cc70-40c7-a5fa-bb36544deb3b-config\") pod \"bbffd8b6-cc70-40c7-a5fa-bb36544deb3b\" (UID: \"bbffd8b6-cc70-40c7-a5fa-bb36544deb3b\") " Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.409247 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/36e98c0d-195d-45f0-9ecb-c83ed362a56b-proxy-ca-bundles\") pod \"36e98c0d-195d-45f0-9ecb-c83ed362a56b\" (UID: \"36e98c0d-195d-45f0-9ecb-c83ed362a56b\") " Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.409282 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bbffd8b6-cc70-40c7-a5fa-bb36544deb3b-serving-cert\") pod \"bbffd8b6-cc70-40c7-a5fa-bb36544deb3b\" (UID: \"bbffd8b6-cc70-40c7-a5fa-bb36544deb3b\") " Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.409308 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/36e98c0d-195d-45f0-9ecb-c83ed362a56b-config\") pod \"36e98c0d-195d-45f0-9ecb-c83ed362a56b\" (UID: \"36e98c0d-195d-45f0-9ecb-c83ed362a56b\") " Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.409328 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.409356 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f6w4l\" (UniqueName: \"kubernetes.io/projected/36e98c0d-195d-45f0-9ecb-c83ed362a56b-kube-api-access-f6w4l\") pod \"36e98c0d-195d-45f0-9ecb-c83ed362a56b\" (UID: \"36e98c0d-195d-45f0-9ecb-c83ed362a56b\") " Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.409454 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0f3756b4-7106-4712-8db0-5a70b0f8c76c-config\") pod \"route-controller-manager-f846b69fd-w82f2\" (UID: \"0f3756b4-7106-4712-8db0-5a70b0f8c76c\") " pod="openshift-route-controller-manager/route-controller-manager-f846b69fd-w82f2" Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.409538 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0f3756b4-7106-4712-8db0-5a70b0f8c76c-serving-cert\") pod \"route-controller-manager-f846b69fd-w82f2\" (UID: \"0f3756b4-7106-4712-8db0-5a70b0f8c76c\") " pod="openshift-route-controller-manager/route-controller-manager-f846b69fd-w82f2" Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.409604 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dtlx6\" (UniqueName: \"kubernetes.io/projected/0f3756b4-7106-4712-8db0-5a70b0f8c76c-kube-api-access-dtlx6\") pod \"route-controller-manager-f846b69fd-w82f2\" (UID: \"0f3756b4-7106-4712-8db0-5a70b0f8c76c\") " pod="openshift-route-controller-manager/route-controller-manager-f846b69fd-w82f2" Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.409679 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/0f3756b4-7106-4712-8db0-5a70b0f8c76c-client-ca\") pod \"route-controller-manager-f846b69fd-w82f2\" (UID: \"0f3756b4-7106-4712-8db0-5a70b0f8c76c\") " pod="openshift-route-controller-manager/route-controller-manager-f846b69fd-w82f2" Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.409762 4753 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/bbffd8b6-cc70-40c7-a5fa-bb36544deb3b-client-ca\") on node \"crc\" DevicePath \"\"" Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.409775 4753 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/36e98c0d-195d-45f0-9ecb-c83ed362a56b-client-ca\") on node \"crc\" DevicePath \"\"" Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.409784 4753 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") on node \"crc\" DevicePath \"\"" Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.409793 4753 reconciler_common.go:293] "Volume detached for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") on node \"crc\" DevicePath \"\"" Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.409864 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock" (OuterVolumeSpecName: "var-lock") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.409961 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/36e98c0d-195d-45f0-9ecb-c83ed362a56b-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "36e98c0d-195d-45f0-9ecb-c83ed362a56b" (UID: "36e98c0d-195d-45f0-9ecb-c83ed362a56b"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.409984 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests" (OuterVolumeSpecName: "manifests") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "manifests". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.410062 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bbffd8b6-cc70-40c7-a5fa-bb36544deb3b-config" (OuterVolumeSpecName: "config") pod "bbffd8b6-cc70-40c7-a5fa-bb36544deb3b" (UID: "bbffd8b6-cc70-40c7-a5fa-bb36544deb3b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.410165 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/36e98c0d-195d-45f0-9ecb-c83ed362a56b-config" (OuterVolumeSpecName: "config") pod "36e98c0d-195d-45f0-9ecb-c83ed362a56b" (UID: "36e98c0d-195d-45f0-9ecb-c83ed362a56b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.414663 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bbffd8b6-cc70-40c7-a5fa-bb36544deb3b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "bbffd8b6-cc70-40c7-a5fa-bb36544deb3b" (UID: "bbffd8b6-cc70-40c7-a5fa-bb36544deb3b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.414671 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/36e98c0d-195d-45f0-9ecb-c83ed362a56b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "36e98c0d-195d-45f0-9ecb-c83ed362a56b" (UID: "36e98c0d-195d-45f0-9ecb-c83ed362a56b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.417676 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bbffd8b6-cc70-40c7-a5fa-bb36544deb3b-kube-api-access-p8988" (OuterVolumeSpecName: "kube-api-access-p8988") pod "bbffd8b6-cc70-40c7-a5fa-bb36544deb3b" (UID: "bbffd8b6-cc70-40c7-a5fa-bb36544deb3b"). InnerVolumeSpecName "kube-api-access-p8988". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.419732 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/36e98c0d-195d-45f0-9ecb-c83ed362a56b-kube-api-access-f6w4l" (OuterVolumeSpecName: "kube-api-access-f6w4l") pod "36e98c0d-195d-45f0-9ecb-c83ed362a56b" (UID: "36e98c0d-195d-45f0-9ecb-c83ed362a56b"). InnerVolumeSpecName "kube-api-access-f6w4l". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.421149 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir" (OuterVolumeSpecName: "pod-resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "pod-resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.484620 4753 patch_prober.go:28] interesting pod/route-controller-manager-7657498d8f-596p2 container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.60:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.484812 4753 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-7657498d8f-596p2" podUID="bbffd8b6-cc70-40c7-a5fa-bb36544deb3b" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.60:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.500765 4753 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-pcxth" Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.510201 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/0f3756b4-7106-4712-8db0-5a70b0f8c76c-client-ca\") pod \"route-controller-manager-f846b69fd-w82f2\" (UID: \"0f3756b4-7106-4712-8db0-5a70b0f8c76c\") " pod="openshift-route-controller-manager/route-controller-manager-f846b69fd-w82f2" Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.510301 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0f3756b4-7106-4712-8db0-5a70b0f8c76c-config\") pod \"route-controller-manager-f846b69fd-w82f2\" (UID: \"0f3756b4-7106-4712-8db0-5a70b0f8c76c\") " pod="openshift-route-controller-manager/route-controller-manager-f846b69fd-w82f2" Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.510410 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0f3756b4-7106-4712-8db0-5a70b0f8c76c-serving-cert\") pod \"route-controller-manager-f846b69fd-w82f2\" (UID: \"0f3756b4-7106-4712-8db0-5a70b0f8c76c\") " pod="openshift-route-controller-manager/route-controller-manager-f846b69fd-w82f2" Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.510440 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dtlx6\" (UniqueName: \"kubernetes.io/projected/0f3756b4-7106-4712-8db0-5a70b0f8c76c-kube-api-access-dtlx6\") pod \"route-controller-manager-f846b69fd-w82f2\" (UID: \"0f3756b4-7106-4712-8db0-5a70b0f8c76c\") " pod="openshift-route-controller-manager/route-controller-manager-f846b69fd-w82f2" Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.510500 4753 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/36e98c0d-195d-45f0-9ecb-c83ed362a56b-config\") on node \"crc\" DevicePath \"\"" Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.510515 4753 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") on node \"crc\" DevicePath \"\"" Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.510530 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f6w4l\" (UniqueName: \"kubernetes.io/projected/36e98c0d-195d-45f0-9ecb-c83ed362a56b-kube-api-access-f6w4l\") on node \"crc\" DevicePath \"\"" Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.510551 4753 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/36e98c0d-195d-45f0-9ecb-c83ed362a56b-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.510562 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p8988\" (UniqueName: \"kubernetes.io/projected/bbffd8b6-cc70-40c7-a5fa-bb36544deb3b-kube-api-access-p8988\") on node \"crc\" DevicePath \"\"" Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.510573 4753 reconciler_common.go:293] "Volume detached for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") on node \"crc\" DevicePath \"\"" Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.510584 4753 reconciler_common.go:293] "Volume detached for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") on node \"crc\" DevicePath \"\"" Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.510595 4753 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bbffd8b6-cc70-40c7-a5fa-bb36544deb3b-config\") on node \"crc\" DevicePath \"\"" Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.510605 4753 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/36e98c0d-195d-45f0-9ecb-c83ed362a56b-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.510615 4753 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bbffd8b6-cc70-40c7-a5fa-bb36544deb3b-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.511789 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/0f3756b4-7106-4712-8db0-5a70b0f8c76c-client-ca\") pod \"route-controller-manager-f846b69fd-w82f2\" (UID: \"0f3756b4-7106-4712-8db0-5a70b0f8c76c\") " pod="openshift-route-controller-manager/route-controller-manager-f846b69fd-w82f2" Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.514626 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0f3756b4-7106-4712-8db0-5a70b0f8c76c-config\") pod \"route-controller-manager-f846b69fd-w82f2\" (UID: \"0f3756b4-7106-4712-8db0-5a70b0f8c76c\") " pod="openshift-route-controller-manager/route-controller-manager-f846b69fd-w82f2" Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.516630 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0f3756b4-7106-4712-8db0-5a70b0f8c76c-serving-cert\") pod \"route-controller-manager-f846b69fd-w82f2\" (UID: \"0f3756b4-7106-4712-8db0-5a70b0f8c76c\") " pod="openshift-route-controller-manager/route-controller-manager-f846b69fd-w82f2" Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.539375 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dtlx6\" (UniqueName: \"kubernetes.io/projected/0f3756b4-7106-4712-8db0-5a70b0f8c76c-kube-api-access-dtlx6\") pod \"route-controller-manager-f846b69fd-w82f2\" (UID: \"0f3756b4-7106-4712-8db0-5a70b0f8c76c\") " pod="openshift-route-controller-manager/route-controller-manager-f846b69fd-w82f2" Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.543966 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-pcxth" Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.576832 4753 generic.go:334] "Generic (PLEG): container finished" podID="200ffbfe-dff4-45e2-944d-34a3ad56f018" containerID="c856dc5681a48fd95b0bffeaab8c686fe562f4191c63b0c20a1beddedf54fd24" exitCode=0 Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.576867 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-b2rb9" event={"ID":"200ffbfe-dff4-45e2-944d-34a3ad56f018","Type":"ContainerDied","Data":"c856dc5681a48fd95b0bffeaab8c686fe562f4191c63b0c20a1beddedf54fd24"} Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.577166 4753 scope.go:117] "RemoveContainer" containerID="c43805542a3469d34d53377647698e9d5207db5b34b4bad078b28cea7c645389" Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.579093 4753 generic.go:334] "Generic (PLEG): container finished" podID="dad90295-65db-470c-8041-19fcf86d0439" containerID="3edfd10e3371577561cb5294e3223259b5cdccc67802061c5edac4bd5dea3ee0" exitCode=0 Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.579150 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-lswml" event={"ID":"dad90295-65db-470c-8041-19fcf86d0439","Type":"ContainerDied","Data":"3edfd10e3371577561cb5294e3223259b5cdccc67802061c5edac4bd5dea3ee0"} Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.580575 4753 scope.go:117] "RemoveContainer" containerID="3edfd10e3371577561cb5294e3223259b5cdccc67802061c5edac4bd5dea3ee0" Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.581278 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-55788fcfd7-spvgx" event={"ID":"36e98c0d-195d-45f0-9ecb-c83ed362a56b","Type":"ContainerDied","Data":"e4fcaa519b12394191fe8305f1b63ba2f8fb9f7ca515fc847452761652b3119f"} Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.581364 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-55788fcfd7-spvgx" Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.584408 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.584579 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.588890 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7657498d8f-596p2" Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.590303 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7657498d8f-596p2" event={"ID":"bbffd8b6-cc70-40c7-a5fa-bb36544deb3b","Type":"ContainerDied","Data":"e4cf0c4649426f8c624c6b6f21578fb67b1411974c8e0521f22c3f33aba2c114"} Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.646373 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-f846b69fd-w82f2" Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.647601 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-55788fcfd7-spvgx"] Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.656741 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-55788fcfd7-spvgx"] Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.661515 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7657498d8f-596p2"] Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.666210 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7657498d8f-596p2"] Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.897064 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="36e98c0d-195d-45f0-9ecb-c83ed362a56b" path="/var/lib/kubelet/pods/36e98c0d-195d-45f0-9ecb-c83ed362a56b/volumes" Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.898003 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bbffd8b6-cc70-40c7-a5fa-bb36544deb3b" path="/var/lib/kubelet/pods/bbffd8b6-cc70-40c7-a5fa-bb36544deb3b/volumes" Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.898574 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" path="/var/lib/kubelet/pods/f85e55b1a89d02b0cb034b1ea31ed45a/volumes" Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.898988 4753 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="" Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.911747 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.912125 4753 kubelet.go:2649] "Unable to find pod for mirror pod, skipping" mirrorPod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" mirrorPodUID="a3855492-3ed8-45c8-b134-c74ba180be39" Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.916486 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Jan 29 12:11:19 crc kubenswrapper[4753]: I0129 12:11:19.916776 4753 kubelet.go:2673] "Unable to find pod for mirror pod, skipping" mirrorPod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" mirrorPodUID="a3855492-3ed8-45c8-b134-c74ba180be39" Jan 29 12:11:20 crc kubenswrapper[4753]: I0129 12:11:20.012672 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Jan 29 12:11:21 crc kubenswrapper[4753]: I0129 12:11:21.517728 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-58f96f7974-5vgpz"] Jan 29 12:11:21 crc kubenswrapper[4753]: I0129 12:11:21.518809 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-58f96f7974-5vgpz" Jan 29 12:11:21 crc kubenswrapper[4753]: I0129 12:11:21.521207 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 29 12:11:21 crc kubenswrapper[4753]: I0129 12:11:21.521890 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 29 12:11:21 crc kubenswrapper[4753]: I0129 12:11:21.522072 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 29 12:11:21 crc kubenswrapper[4753]: I0129 12:11:21.524112 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 29 12:11:21 crc kubenswrapper[4753]: I0129 12:11:21.528127 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 29 12:11:21 crc kubenswrapper[4753]: I0129 12:11:21.530847 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 29 12:11:21 crc kubenswrapper[4753]: I0129 12:11:21.531741 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 29 12:11:21 crc kubenswrapper[4753]: I0129 12:11:21.629751 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/aa5fe7ae-f633-4d08-a3a5-05ff5eb4dfc1-proxy-ca-bundles\") pod \"controller-manager-58f96f7974-5vgpz\" (UID: \"aa5fe7ae-f633-4d08-a3a5-05ff5eb4dfc1\") " pod="openshift-controller-manager/controller-manager-58f96f7974-5vgpz" Jan 29 12:11:21 crc kubenswrapper[4753]: I0129 12:11:21.629841 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sbp76\" (UniqueName: \"kubernetes.io/projected/aa5fe7ae-f633-4d08-a3a5-05ff5eb4dfc1-kube-api-access-sbp76\") pod \"controller-manager-58f96f7974-5vgpz\" (UID: \"aa5fe7ae-f633-4d08-a3a5-05ff5eb4dfc1\") " pod="openshift-controller-manager/controller-manager-58f96f7974-5vgpz" Jan 29 12:11:21 crc kubenswrapper[4753]: I0129 12:11:21.629957 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/aa5fe7ae-f633-4d08-a3a5-05ff5eb4dfc1-serving-cert\") pod \"controller-manager-58f96f7974-5vgpz\" (UID: \"aa5fe7ae-f633-4d08-a3a5-05ff5eb4dfc1\") " pod="openshift-controller-manager/controller-manager-58f96f7974-5vgpz" Jan 29 12:11:21 crc kubenswrapper[4753]: I0129 12:11:21.629992 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aa5fe7ae-f633-4d08-a3a5-05ff5eb4dfc1-config\") pod \"controller-manager-58f96f7974-5vgpz\" (UID: \"aa5fe7ae-f633-4d08-a3a5-05ff5eb4dfc1\") " pod="openshift-controller-manager/controller-manager-58f96f7974-5vgpz" Jan 29 12:11:21 crc kubenswrapper[4753]: I0129 12:11:21.630059 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/aa5fe7ae-f633-4d08-a3a5-05ff5eb4dfc1-client-ca\") pod \"controller-manager-58f96f7974-5vgpz\" (UID: \"aa5fe7ae-f633-4d08-a3a5-05ff5eb4dfc1\") " pod="openshift-controller-manager/controller-manager-58f96f7974-5vgpz" Jan 29 12:11:21 crc kubenswrapper[4753]: I0129 12:11:21.731803 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/aa5fe7ae-f633-4d08-a3a5-05ff5eb4dfc1-proxy-ca-bundles\") pod \"controller-manager-58f96f7974-5vgpz\" (UID: \"aa5fe7ae-f633-4d08-a3a5-05ff5eb4dfc1\") " pod="openshift-controller-manager/controller-manager-58f96f7974-5vgpz" Jan 29 12:11:21 crc kubenswrapper[4753]: I0129 12:11:21.731876 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sbp76\" (UniqueName: \"kubernetes.io/projected/aa5fe7ae-f633-4d08-a3a5-05ff5eb4dfc1-kube-api-access-sbp76\") pod \"controller-manager-58f96f7974-5vgpz\" (UID: \"aa5fe7ae-f633-4d08-a3a5-05ff5eb4dfc1\") " pod="openshift-controller-manager/controller-manager-58f96f7974-5vgpz" Jan 29 12:11:21 crc kubenswrapper[4753]: I0129 12:11:21.731951 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/aa5fe7ae-f633-4d08-a3a5-05ff5eb4dfc1-serving-cert\") pod \"controller-manager-58f96f7974-5vgpz\" (UID: \"aa5fe7ae-f633-4d08-a3a5-05ff5eb4dfc1\") " pod="openshift-controller-manager/controller-manager-58f96f7974-5vgpz" Jan 29 12:11:21 crc kubenswrapper[4753]: I0129 12:11:21.732006 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aa5fe7ae-f633-4d08-a3a5-05ff5eb4dfc1-config\") pod \"controller-manager-58f96f7974-5vgpz\" (UID: \"aa5fe7ae-f633-4d08-a3a5-05ff5eb4dfc1\") " pod="openshift-controller-manager/controller-manager-58f96f7974-5vgpz" Jan 29 12:11:21 crc kubenswrapper[4753]: I0129 12:11:21.732045 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/aa5fe7ae-f633-4d08-a3a5-05ff5eb4dfc1-client-ca\") pod \"controller-manager-58f96f7974-5vgpz\" (UID: \"aa5fe7ae-f633-4d08-a3a5-05ff5eb4dfc1\") " pod="openshift-controller-manager/controller-manager-58f96f7974-5vgpz" Jan 29 12:11:21 crc kubenswrapper[4753]: I0129 12:11:21.733212 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/aa5fe7ae-f633-4d08-a3a5-05ff5eb4dfc1-client-ca\") pod \"controller-manager-58f96f7974-5vgpz\" (UID: \"aa5fe7ae-f633-4d08-a3a5-05ff5eb4dfc1\") " pod="openshift-controller-manager/controller-manager-58f96f7974-5vgpz" Jan 29 12:11:21 crc kubenswrapper[4753]: I0129 12:11:21.733729 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/aa5fe7ae-f633-4d08-a3a5-05ff5eb4dfc1-proxy-ca-bundles\") pod \"controller-manager-58f96f7974-5vgpz\" (UID: \"aa5fe7ae-f633-4d08-a3a5-05ff5eb4dfc1\") " pod="openshift-controller-manager/controller-manager-58f96f7974-5vgpz" Jan 29 12:11:21 crc kubenswrapper[4753]: I0129 12:11:21.734610 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aa5fe7ae-f633-4d08-a3a5-05ff5eb4dfc1-config\") pod \"controller-manager-58f96f7974-5vgpz\" (UID: \"aa5fe7ae-f633-4d08-a3a5-05ff5eb4dfc1\") " pod="openshift-controller-manager/controller-manager-58f96f7974-5vgpz" Jan 29 12:11:21 crc kubenswrapper[4753]: I0129 12:11:21.741448 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/aa5fe7ae-f633-4d08-a3a5-05ff5eb4dfc1-serving-cert\") pod \"controller-manager-58f96f7974-5vgpz\" (UID: \"aa5fe7ae-f633-4d08-a3a5-05ff5eb4dfc1\") " pod="openshift-controller-manager/controller-manager-58f96f7974-5vgpz" Jan 29 12:11:21 crc kubenswrapper[4753]: I0129 12:11:21.750132 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sbp76\" (UniqueName: \"kubernetes.io/projected/aa5fe7ae-f633-4d08-a3a5-05ff5eb4dfc1-kube-api-access-sbp76\") pod \"controller-manager-58f96f7974-5vgpz\" (UID: \"aa5fe7ae-f633-4d08-a3a5-05ff5eb4dfc1\") " pod="openshift-controller-manager/controller-manager-58f96f7974-5vgpz" Jan 29 12:11:21 crc kubenswrapper[4753]: I0129 12:11:21.836835 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-58f96f7974-5vgpz" Jan 29 12:11:22 crc kubenswrapper[4753]: I0129 12:11:22.182412 4753 scope.go:117] "RemoveContainer" containerID="de5712b884dcb2dba2784a29f8a07a21e33ce657aa22ddcef34f8a79439d3703" Jan 29 12:11:22 crc kubenswrapper[4753]: I0129 12:11:22.219005 4753 scope.go:117] "RemoveContainer" containerID="dbb5b279c7f8838a84b1b3b2045df0266ab38c31c624a472794f3dae74d9ddc4" Jan 29 12:11:22 crc kubenswrapper[4753]: I0129 12:11:22.309805 4753 scope.go:117] "RemoveContainer" containerID="789f686350a37c412441d5d553653c635a6b80fed4f60fc3433c350500e695c9" Jan 29 12:11:22 crc kubenswrapper[4753]: I0129 12:11:22.389871 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Jan 29 12:11:23 crc kubenswrapper[4753]: I0129 12:11:23.628733 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-z6xk4" event={"ID":"d0809beb-ae87-4bf7-aa2d-20dbe819c3cc","Type":"ContainerStarted","Data":"a3cd9f298e84306e0b3dc8b1b3f5f7d784c3b06714ab49dc7b77251639ad2000"} Jan 29 12:11:24 crc kubenswrapper[4753]: I0129 12:11:24.638089 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-t89jd" event={"ID":"ce6846a0-6c85-4ae9-afae-b10ead46d21d","Type":"ContainerStarted","Data":"84bbad5f70d02ec38037634220f255a1473f68ffd747febf707eb34e1ea997e8"} Jan 29 12:11:24 crc kubenswrapper[4753]: I0129 12:11:24.641068 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-b2rb9" event={"ID":"200ffbfe-dff4-45e2-944d-34a3ad56f018","Type":"ContainerStarted","Data":"6da94d13b888855ffd6729a40ea56e2dc9e01f40f92233de69ef17ae56e65714"} Jan 29 12:11:24 crc kubenswrapper[4753]: I0129 12:11:24.641259 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-b2rb9" Jan 29 12:11:24 crc kubenswrapper[4753]: I0129 12:11:24.641801 4753 patch_prober.go:28] interesting pod/downloads-7954f5f757-b2rb9 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" start-of-body= Jan 29 12:11:24 crc kubenswrapper[4753]: I0129 12:11:24.641861 4753 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-b2rb9" podUID="200ffbfe-dff4-45e2-944d-34a3ad56f018" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" Jan 29 12:11:24 crc kubenswrapper[4753]: I0129 12:11:24.643073 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-lswml" event={"ID":"dad90295-65db-470c-8041-19fcf86d0439","Type":"ContainerStarted","Data":"771657a3800bbb3d40ea73f6e835cbb5b06aec1354fbf2aecd2eb76a6d3170f2"} Jan 29 12:11:24 crc kubenswrapper[4753]: I0129 12:11:24.643530 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-lswml" Jan 29 12:11:24 crc kubenswrapper[4753]: I0129 12:11:24.645457 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dgs2s" event={"ID":"13c02ae6-2d17-4b39-9f64-4a7a99a6ffc6","Type":"ContainerStarted","Data":"319898d3a1c9621b0384b4edd10b29f9300cc14aea765872df4951cdad8b3e01"} Jan 29 12:11:24 crc kubenswrapper[4753]: I0129 12:11:24.648758 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-lswml" Jan 29 12:11:24 crc kubenswrapper[4753]: I0129 12:11:24.666508 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-t89jd" podStartSLOduration=16.625923589 podStartE2EDuration="2m47.666464716s" podCreationTimestamp="2026-01-29 12:08:37 +0000 UTC" firstStartedPulling="2026-01-29 12:08:51.142112577 +0000 UTC m=+145.394194022" lastFinishedPulling="2026-01-29 12:11:22.182653694 +0000 UTC m=+296.434735149" observedRunningTime="2026-01-29 12:11:24.66589692 +0000 UTC m=+298.917978375" watchObservedRunningTime="2026-01-29 12:11:24.666464716 +0000 UTC m=+298.918546171" Jan 29 12:11:24 crc kubenswrapper[4753]: I0129 12:11:24.685359 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-dgs2s" podStartSLOduration=15.324158859 podStartE2EDuration="2m44.685340204s" podCreationTimestamp="2026-01-29 12:08:40 +0000 UTC" firstStartedPulling="2026-01-29 12:08:52.821711556 +0000 UTC m=+147.073793011" lastFinishedPulling="2026-01-29 12:11:22.182892901 +0000 UTC m=+296.434974356" observedRunningTime="2026-01-29 12:11:24.683306194 +0000 UTC m=+298.935387649" watchObservedRunningTime="2026-01-29 12:11:24.685340204 +0000 UTC m=+298.937421659" Jan 29 12:11:24 crc kubenswrapper[4753]: I0129 12:11:24.707916 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-z6xk4" podStartSLOduration=17.602348034 podStartE2EDuration="2m44.707893871s" podCreationTimestamp="2026-01-29 12:08:40 +0000 UTC" firstStartedPulling="2026-01-29 12:08:52.822120988 +0000 UTC m=+147.074202443" lastFinishedPulling="2026-01-29 12:11:19.927666825 +0000 UTC m=+294.179748280" observedRunningTime="2026-01-29 12:11:24.705014666 +0000 UTC m=+298.957096141" watchObservedRunningTime="2026-01-29 12:11:24.707893871 +0000 UTC m=+298.959975316" Jan 29 12:11:25 crc kubenswrapper[4753]: I0129 12:11:25.313875 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-z6xk4" Jan 29 12:11:25 crc kubenswrapper[4753]: I0129 12:11:25.314091 4753 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-z6xk4" Jan 29 12:11:25 crc kubenswrapper[4753]: I0129 12:11:25.745462 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Jan 29 12:11:25 crc kubenswrapper[4753]: I0129 12:11:25.750714 4753 patch_prober.go:28] interesting pod/downloads-7954f5f757-b2rb9 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" start-of-body= Jan 29 12:11:25 crc kubenswrapper[4753]: I0129 12:11:25.750805 4753 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-b2rb9" podUID="200ffbfe-dff4-45e2-944d-34a3ad56f018" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" Jan 29 12:11:26 crc kubenswrapper[4753]: I0129 12:11:26.358541 4753 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-z6xk4" podUID="d0809beb-ae87-4bf7-aa2d-20dbe819c3cc" containerName="registry-server" probeResult="failure" output=< Jan 29 12:11:26 crc kubenswrapper[4753]: timeout: failed to connect service ":50051" within 1s Jan 29 12:11:26 crc kubenswrapper[4753]: > Jan 29 12:11:26 crc kubenswrapper[4753]: I0129 12:11:26.366045 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Jan 29 12:11:27 crc kubenswrapper[4753]: I0129 12:11:27.246785 4753 cert_rotation.go:91] certificate rotation detected, shutting down client connections to start using new credentials Jan 29 12:11:27 crc kubenswrapper[4753]: I0129 12:11:27.283140 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Jan 29 12:11:27 crc kubenswrapper[4753]: I0129 12:11:27.839921 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-58f96f7974-5vgpz"] Jan 29 12:11:27 crc kubenswrapper[4753]: I0129 12:11:27.880423 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-f846b69fd-w82f2"] Jan 29 12:11:28 crc kubenswrapper[4753]: I0129 12:11:28.225275 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-58f96f7974-5vgpz"] Jan 29 12:11:28 crc kubenswrapper[4753]: I0129 12:11:28.248211 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-f846b69fd-w82f2"] Jan 29 12:11:28 crc kubenswrapper[4753]: W0129 12:11:28.256019 4753 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0f3756b4_7106_4712_8db0_5a70b0f8c76c.slice/crio-e66ed72b4c5d03a6d9e057dd5a5d09842a509b44fb2e0ffa3f5934419d80bda0 WatchSource:0}: Error finding container e66ed72b4c5d03a6d9e057dd5a5d09842a509b44fb2e0ffa3f5934419d80bda0: Status 404 returned error can't find the container with id e66ed72b4c5d03a6d9e057dd5a5d09842a509b44fb2e0ffa3f5934419d80bda0 Jan 29 12:11:28 crc kubenswrapper[4753]: I0129 12:11:28.507100 4753 patch_prober.go:28] interesting pod/downloads-7954f5f757-b2rb9 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" start-of-body= Jan 29 12:11:28 crc kubenswrapper[4753]: I0129 12:11:28.507165 4753 patch_prober.go:28] interesting pod/downloads-7954f5f757-b2rb9 container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" start-of-body= Jan 29 12:11:28 crc kubenswrapper[4753]: I0129 12:11:28.507191 4753 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-b2rb9" podUID="200ffbfe-dff4-45e2-944d-34a3ad56f018" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" Jan 29 12:11:28 crc kubenswrapper[4753]: I0129 12:11:28.507265 4753 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-b2rb9" podUID="200ffbfe-dff4-45e2-944d-34a3ad56f018" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.25:8080/\": dial tcp 10.217.0.25:8080: connect: connection refused" Jan 29 12:11:28 crc kubenswrapper[4753]: I0129 12:11:28.799942 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-f846b69fd-w82f2" event={"ID":"0f3756b4-7106-4712-8db0-5a70b0f8c76c","Type":"ContainerStarted","Data":"e66ed72b4c5d03a6d9e057dd5a5d09842a509b44fb2e0ffa3f5934419d80bda0"} Jan 29 12:11:28 crc kubenswrapper[4753]: I0129 12:11:28.801260 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-58f96f7974-5vgpz" event={"ID":"aa5fe7ae-f633-4d08-a3a5-05ff5eb4dfc1","Type":"ContainerStarted","Data":"50c1223ba173d163035008d505a4efaced39adac844f2279451b6aa04d819cc2"} Jan 29 12:11:29 crc kubenswrapper[4753]: I0129 12:11:29.253657 4753 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-t89jd" Jan 29 12:11:29 crc kubenswrapper[4753]: I0129 12:11:29.253715 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-t89jd" Jan 29 12:11:29 crc kubenswrapper[4753]: I0129 12:11:29.315456 4753 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-t89jd" Jan 29 12:11:29 crc kubenswrapper[4753]: I0129 12:11:29.825576 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-f846b69fd-w82f2" event={"ID":"0f3756b4-7106-4712-8db0-5a70b0f8c76c","Type":"ContainerStarted","Data":"220855aae0fa4e85e673119064897af9818f696e003cce31bccd316112146c0c"} Jan 29 12:11:29 crc kubenswrapper[4753]: I0129 12:11:29.835091 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-58f96f7974-5vgpz" event={"ID":"aa5fe7ae-f633-4d08-a3a5-05ff5eb4dfc1","Type":"ContainerStarted","Data":"3205f45e2a562b00298e4a89041971b9d747d21afb428dff89c9c08a016d597f"} Jan 29 12:11:29 crc kubenswrapper[4753]: I0129 12:11:29.835441 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-58f96f7974-5vgpz" Jan 29 12:11:29 crc kubenswrapper[4753]: I0129 12:11:29.876444 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-58f96f7974-5vgpz" Jan 29 12:11:29 crc kubenswrapper[4753]: I0129 12:11:29.878464 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-t89jd" Jan 29 12:11:29 crc kubenswrapper[4753]: I0129 12:11:29.907938 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-58f96f7974-5vgpz" podStartSLOduration=17.907918406 podStartE2EDuration="17.907918406s" podCreationTimestamp="2026-01-29 12:11:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:11:29.877707893 +0000 UTC m=+304.129789348" watchObservedRunningTime="2026-01-29 12:11:29.907918406 +0000 UTC m=+304.159999861" Jan 29 12:11:31 crc kubenswrapper[4753]: I0129 12:11:31.067668 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-f846b69fd-w82f2" podStartSLOduration=19.067523855 podStartE2EDuration="19.067523855s" podCreationTimestamp="2026-01-29 12:11:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:11:31.066991759 +0000 UTC m=+305.319073244" watchObservedRunningTime="2026-01-29 12:11:31.067523855 +0000 UTC m=+305.319605320" Jan 29 12:11:31 crc kubenswrapper[4753]: I0129 12:11:31.702170 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Jan 29 12:11:32 crc kubenswrapper[4753]: I0129 12:11:32.205407 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-58f96f7974-5vgpz"] Jan 29 12:11:32 crc kubenswrapper[4753]: I0129 12:11:32.230869 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-f846b69fd-w82f2"] Jan 29 12:11:32 crc kubenswrapper[4753]: I0129 12:11:32.231666 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-f846b69fd-w82f2" Jan 29 12:11:32 crc kubenswrapper[4753]: I0129 12:11:32.232585 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-f846b69fd-w82f2" podUID="0f3756b4-7106-4712-8db0-5a70b0f8c76c" containerName="route-controller-manager" containerID="cri-o://220855aae0fa4e85e673119064897af9818f696e003cce31bccd316112146c0c" gracePeriod=30 Jan 29 12:11:32 crc kubenswrapper[4753]: I0129 12:11:32.254486 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-f846b69fd-w82f2" Jan 29 12:11:33 crc kubenswrapper[4753]: I0129 12:11:33.061921 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Jan 29 12:11:33 crc kubenswrapper[4753]: I0129 12:11:33.113047 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-f846b69fd-w82f2" event={"ID":"0f3756b4-7106-4712-8db0-5a70b0f8c76c","Type":"ContainerDied","Data":"220855aae0fa4e85e673119064897af9818f696e003cce31bccd316112146c0c"} Jan 29 12:11:33 crc kubenswrapper[4753]: I0129 12:11:33.113086 4753 generic.go:334] "Generic (PLEG): container finished" podID="0f3756b4-7106-4712-8db0-5a70b0f8c76c" containerID="220855aae0fa4e85e673119064897af9818f696e003cce31bccd316112146c0c" exitCode=0 Jan 29 12:11:33 crc kubenswrapper[4753]: I0129 12:11:33.113533 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-58f96f7974-5vgpz" podUID="aa5fe7ae-f633-4d08-a3a5-05ff5eb4dfc1" containerName="controller-manager" containerID="cri-o://3205f45e2a562b00298e4a89041971b9d747d21afb428dff89c9c08a016d597f" gracePeriod=30 Jan 29 12:11:33 crc kubenswrapper[4753]: I0129 12:11:33.901084 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-f846b69fd-w82f2" Jan 29 12:11:33 crc kubenswrapper[4753]: I0129 12:11:33.936149 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-64cb998744-dj6xp"] Jan 29 12:11:33 crc kubenswrapper[4753]: E0129 12:11:33.936714 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0f3756b4-7106-4712-8db0-5a70b0f8c76c" containerName="route-controller-manager" Jan 29 12:11:33 crc kubenswrapper[4753]: I0129 12:11:33.936760 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="0f3756b4-7106-4712-8db0-5a70b0f8c76c" containerName="route-controller-manager" Jan 29 12:11:33 crc kubenswrapper[4753]: I0129 12:11:33.937035 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="0f3756b4-7106-4712-8db0-5a70b0f8c76c" containerName="route-controller-manager" Jan 29 12:11:33 crc kubenswrapper[4753]: I0129 12:11:33.937670 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-64cb998744-dj6xp" Jan 29 12:11:33 crc kubenswrapper[4753]: I0129 12:11:33.940345 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-64cb998744-dj6xp"] Jan 29 12:11:34 crc kubenswrapper[4753]: I0129 12:11:34.013776 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0f3756b4-7106-4712-8db0-5a70b0f8c76c-serving-cert\") pod \"0f3756b4-7106-4712-8db0-5a70b0f8c76c\" (UID: \"0f3756b4-7106-4712-8db0-5a70b0f8c76c\") " Jan 29 12:11:34 crc kubenswrapper[4753]: I0129 12:11:34.014132 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dtlx6\" (UniqueName: \"kubernetes.io/projected/0f3756b4-7106-4712-8db0-5a70b0f8c76c-kube-api-access-dtlx6\") pod \"0f3756b4-7106-4712-8db0-5a70b0f8c76c\" (UID: \"0f3756b4-7106-4712-8db0-5a70b0f8c76c\") " Jan 29 12:11:34 crc kubenswrapper[4753]: I0129 12:11:34.014243 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/0f3756b4-7106-4712-8db0-5a70b0f8c76c-client-ca\") pod \"0f3756b4-7106-4712-8db0-5a70b0f8c76c\" (UID: \"0f3756b4-7106-4712-8db0-5a70b0f8c76c\") " Jan 29 12:11:34 crc kubenswrapper[4753]: I0129 12:11:34.015132 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0f3756b4-7106-4712-8db0-5a70b0f8c76c-client-ca" (OuterVolumeSpecName: "client-ca") pod "0f3756b4-7106-4712-8db0-5a70b0f8c76c" (UID: "0f3756b4-7106-4712-8db0-5a70b0f8c76c"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:11:34 crc kubenswrapper[4753]: I0129 12:11:34.015192 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0f3756b4-7106-4712-8db0-5a70b0f8c76c-config\") pod \"0f3756b4-7106-4712-8db0-5a70b0f8c76c\" (UID: \"0f3756b4-7106-4712-8db0-5a70b0f8c76c\") " Jan 29 12:11:34 crc kubenswrapper[4753]: I0129 12:11:34.015243 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0f3756b4-7106-4712-8db0-5a70b0f8c76c-config" (OuterVolumeSpecName: "config") pod "0f3756b4-7106-4712-8db0-5a70b0f8c76c" (UID: "0f3756b4-7106-4712-8db0-5a70b0f8c76c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:11:34 crc kubenswrapper[4753]: I0129 12:11:34.015423 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6554d341-0e97-4e30-9a6d-614e6ad77e40-client-ca\") pod \"route-controller-manager-64cb998744-dj6xp\" (UID: \"6554d341-0e97-4e30-9a6d-614e6ad77e40\") " pod="openshift-route-controller-manager/route-controller-manager-64cb998744-dj6xp" Jan 29 12:11:34 crc kubenswrapper[4753]: I0129 12:11:34.015696 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jvkk7\" (UniqueName: \"kubernetes.io/projected/6554d341-0e97-4e30-9a6d-614e6ad77e40-kube-api-access-jvkk7\") pod \"route-controller-manager-64cb998744-dj6xp\" (UID: \"6554d341-0e97-4e30-9a6d-614e6ad77e40\") " pod="openshift-route-controller-manager/route-controller-manager-64cb998744-dj6xp" Jan 29 12:11:34 crc kubenswrapper[4753]: I0129 12:11:34.015882 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6554d341-0e97-4e30-9a6d-614e6ad77e40-serving-cert\") pod \"route-controller-manager-64cb998744-dj6xp\" (UID: \"6554d341-0e97-4e30-9a6d-614e6ad77e40\") " pod="openshift-route-controller-manager/route-controller-manager-64cb998744-dj6xp" Jan 29 12:11:34 crc kubenswrapper[4753]: I0129 12:11:34.015972 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6554d341-0e97-4e30-9a6d-614e6ad77e40-config\") pod \"route-controller-manager-64cb998744-dj6xp\" (UID: \"6554d341-0e97-4e30-9a6d-614e6ad77e40\") " pod="openshift-route-controller-manager/route-controller-manager-64cb998744-dj6xp" Jan 29 12:11:34 crc kubenswrapper[4753]: I0129 12:11:34.016087 4753 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/0f3756b4-7106-4712-8db0-5a70b0f8c76c-client-ca\") on node \"crc\" DevicePath \"\"" Jan 29 12:11:34 crc kubenswrapper[4753]: I0129 12:11:34.016104 4753 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0f3756b4-7106-4712-8db0-5a70b0f8c76c-config\") on node \"crc\" DevicePath \"\"" Jan 29 12:11:34 crc kubenswrapper[4753]: I0129 12:11:34.021162 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0f3756b4-7106-4712-8db0-5a70b0f8c76c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0f3756b4-7106-4712-8db0-5a70b0f8c76c" (UID: "0f3756b4-7106-4712-8db0-5a70b0f8c76c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:11:34 crc kubenswrapper[4753]: I0129 12:11:34.021371 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0f3756b4-7106-4712-8db0-5a70b0f8c76c-kube-api-access-dtlx6" (OuterVolumeSpecName: "kube-api-access-dtlx6") pod "0f3756b4-7106-4712-8db0-5a70b0f8c76c" (UID: "0f3756b4-7106-4712-8db0-5a70b0f8c76c"). InnerVolumeSpecName "kube-api-access-dtlx6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:11:34 crc kubenswrapper[4753]: I0129 12:11:34.117838 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6554d341-0e97-4e30-9a6d-614e6ad77e40-config\") pod \"route-controller-manager-64cb998744-dj6xp\" (UID: \"6554d341-0e97-4e30-9a6d-614e6ad77e40\") " pod="openshift-route-controller-manager/route-controller-manager-64cb998744-dj6xp" Jan 29 12:11:34 crc kubenswrapper[4753]: I0129 12:11:34.117929 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6554d341-0e97-4e30-9a6d-614e6ad77e40-client-ca\") pod \"route-controller-manager-64cb998744-dj6xp\" (UID: \"6554d341-0e97-4e30-9a6d-614e6ad77e40\") " pod="openshift-route-controller-manager/route-controller-manager-64cb998744-dj6xp" Jan 29 12:11:34 crc kubenswrapper[4753]: I0129 12:11:34.117974 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jvkk7\" (UniqueName: \"kubernetes.io/projected/6554d341-0e97-4e30-9a6d-614e6ad77e40-kube-api-access-jvkk7\") pod \"route-controller-manager-64cb998744-dj6xp\" (UID: \"6554d341-0e97-4e30-9a6d-614e6ad77e40\") " pod="openshift-route-controller-manager/route-controller-manager-64cb998744-dj6xp" Jan 29 12:11:34 crc kubenswrapper[4753]: I0129 12:11:34.118036 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6554d341-0e97-4e30-9a6d-614e6ad77e40-serving-cert\") pod \"route-controller-manager-64cb998744-dj6xp\" (UID: \"6554d341-0e97-4e30-9a6d-614e6ad77e40\") " pod="openshift-route-controller-manager/route-controller-manager-64cb998744-dj6xp" Jan 29 12:11:34 crc kubenswrapper[4753]: I0129 12:11:34.118083 4753 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0f3756b4-7106-4712-8db0-5a70b0f8c76c-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 12:11:34 crc kubenswrapper[4753]: I0129 12:11:34.118097 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dtlx6\" (UniqueName: \"kubernetes.io/projected/0f3756b4-7106-4712-8db0-5a70b0f8c76c-kube-api-access-dtlx6\") on node \"crc\" DevicePath \"\"" Jan 29 12:11:34 crc kubenswrapper[4753]: I0129 12:11:34.119827 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6554d341-0e97-4e30-9a6d-614e6ad77e40-client-ca\") pod \"route-controller-manager-64cb998744-dj6xp\" (UID: \"6554d341-0e97-4e30-9a6d-614e6ad77e40\") " pod="openshift-route-controller-manager/route-controller-manager-64cb998744-dj6xp" Jan 29 12:11:34 crc kubenswrapper[4753]: I0129 12:11:34.120716 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6554d341-0e97-4e30-9a6d-614e6ad77e40-config\") pod \"route-controller-manager-64cb998744-dj6xp\" (UID: \"6554d341-0e97-4e30-9a6d-614e6ad77e40\") " pod="openshift-route-controller-manager/route-controller-manager-64cb998744-dj6xp" Jan 29 12:11:34 crc kubenswrapper[4753]: I0129 12:11:34.122610 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6554d341-0e97-4e30-9a6d-614e6ad77e40-serving-cert\") pod \"route-controller-manager-64cb998744-dj6xp\" (UID: \"6554d341-0e97-4e30-9a6d-614e6ad77e40\") " pod="openshift-route-controller-manager/route-controller-manager-64cb998744-dj6xp" Jan 29 12:11:34 crc kubenswrapper[4753]: I0129 12:11:34.128091 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-f846b69fd-w82f2" Jan 29 12:11:34 crc kubenswrapper[4753]: I0129 12:11:34.128086 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-f846b69fd-w82f2" event={"ID":"0f3756b4-7106-4712-8db0-5a70b0f8c76c","Type":"ContainerDied","Data":"e66ed72b4c5d03a6d9e057dd5a5d09842a509b44fb2e0ffa3f5934419d80bda0"} Jan 29 12:11:34 crc kubenswrapper[4753]: I0129 12:11:34.128280 4753 scope.go:117] "RemoveContainer" containerID="220855aae0fa4e85e673119064897af9818f696e003cce31bccd316112146c0c" Jan 29 12:11:34 crc kubenswrapper[4753]: I0129 12:11:34.133391 4753 generic.go:334] "Generic (PLEG): container finished" podID="aa5fe7ae-f633-4d08-a3a5-05ff5eb4dfc1" containerID="3205f45e2a562b00298e4a89041971b9d747d21afb428dff89c9c08a016d597f" exitCode=0 Jan 29 12:11:34 crc kubenswrapper[4753]: I0129 12:11:34.133452 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-58f96f7974-5vgpz" event={"ID":"aa5fe7ae-f633-4d08-a3a5-05ff5eb4dfc1","Type":"ContainerDied","Data":"3205f45e2a562b00298e4a89041971b9d747d21afb428dff89c9c08a016d597f"} Jan 29 12:11:34 crc kubenswrapper[4753]: I0129 12:11:34.138059 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jvkk7\" (UniqueName: \"kubernetes.io/projected/6554d341-0e97-4e30-9a6d-614e6ad77e40-kube-api-access-jvkk7\") pod \"route-controller-manager-64cb998744-dj6xp\" (UID: \"6554d341-0e97-4e30-9a6d-614e6ad77e40\") " pod="openshift-route-controller-manager/route-controller-manager-64cb998744-dj6xp" Jan 29 12:11:34 crc kubenswrapper[4753]: I0129 12:11:34.188759 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-f846b69fd-w82f2"] Jan 29 12:11:34 crc kubenswrapper[4753]: I0129 12:11:34.194261 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-f846b69fd-w82f2"] Jan 29 12:11:34 crc kubenswrapper[4753]: I0129 12:11:34.263275 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-64cb998744-dj6xp" Jan 29 12:11:34 crc kubenswrapper[4753]: I0129 12:11:34.416463 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-58f96f7974-5vgpz" Jan 29 12:11:34 crc kubenswrapper[4753]: I0129 12:11:34.524117 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/aa5fe7ae-f633-4d08-a3a5-05ff5eb4dfc1-serving-cert\") pod \"aa5fe7ae-f633-4d08-a3a5-05ff5eb4dfc1\" (UID: \"aa5fe7ae-f633-4d08-a3a5-05ff5eb4dfc1\") " Jan 29 12:11:34 crc kubenswrapper[4753]: I0129 12:11:34.524311 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/aa5fe7ae-f633-4d08-a3a5-05ff5eb4dfc1-proxy-ca-bundles\") pod \"aa5fe7ae-f633-4d08-a3a5-05ff5eb4dfc1\" (UID: \"aa5fe7ae-f633-4d08-a3a5-05ff5eb4dfc1\") " Jan 29 12:11:34 crc kubenswrapper[4753]: I0129 12:11:34.524366 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/aa5fe7ae-f633-4d08-a3a5-05ff5eb4dfc1-client-ca\") pod \"aa5fe7ae-f633-4d08-a3a5-05ff5eb4dfc1\" (UID: \"aa5fe7ae-f633-4d08-a3a5-05ff5eb4dfc1\") " Jan 29 12:11:34 crc kubenswrapper[4753]: I0129 12:11:34.524400 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sbp76\" (UniqueName: \"kubernetes.io/projected/aa5fe7ae-f633-4d08-a3a5-05ff5eb4dfc1-kube-api-access-sbp76\") pod \"aa5fe7ae-f633-4d08-a3a5-05ff5eb4dfc1\" (UID: \"aa5fe7ae-f633-4d08-a3a5-05ff5eb4dfc1\") " Jan 29 12:11:34 crc kubenswrapper[4753]: I0129 12:11:34.524431 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aa5fe7ae-f633-4d08-a3a5-05ff5eb4dfc1-config\") pod \"aa5fe7ae-f633-4d08-a3a5-05ff5eb4dfc1\" (UID: \"aa5fe7ae-f633-4d08-a3a5-05ff5eb4dfc1\") " Jan 29 12:11:34 crc kubenswrapper[4753]: I0129 12:11:34.525134 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aa5fe7ae-f633-4d08-a3a5-05ff5eb4dfc1-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "aa5fe7ae-f633-4d08-a3a5-05ff5eb4dfc1" (UID: "aa5fe7ae-f633-4d08-a3a5-05ff5eb4dfc1"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:11:34 crc kubenswrapper[4753]: I0129 12:11:34.525207 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aa5fe7ae-f633-4d08-a3a5-05ff5eb4dfc1-client-ca" (OuterVolumeSpecName: "client-ca") pod "aa5fe7ae-f633-4d08-a3a5-05ff5eb4dfc1" (UID: "aa5fe7ae-f633-4d08-a3a5-05ff5eb4dfc1"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:11:34 crc kubenswrapper[4753]: I0129 12:11:34.525403 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aa5fe7ae-f633-4d08-a3a5-05ff5eb4dfc1-config" (OuterVolumeSpecName: "config") pod "aa5fe7ae-f633-4d08-a3a5-05ff5eb4dfc1" (UID: "aa5fe7ae-f633-4d08-a3a5-05ff5eb4dfc1"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:11:34 crc kubenswrapper[4753]: I0129 12:11:34.530392 4753 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-dgs2s" Jan 29 12:11:34 crc kubenswrapper[4753]: I0129 12:11:34.530437 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-dgs2s" Jan 29 12:11:34 crc kubenswrapper[4753]: I0129 12:11:34.606259 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aa5fe7ae-f633-4d08-a3a5-05ff5eb4dfc1-kube-api-access-sbp76" (OuterVolumeSpecName: "kube-api-access-sbp76") pod "aa5fe7ae-f633-4d08-a3a5-05ff5eb4dfc1" (UID: "aa5fe7ae-f633-4d08-a3a5-05ff5eb4dfc1"). InnerVolumeSpecName "kube-api-access-sbp76". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:11:34 crc kubenswrapper[4753]: I0129 12:11:34.609992 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aa5fe7ae-f633-4d08-a3a5-05ff5eb4dfc1-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "aa5fe7ae-f633-4d08-a3a5-05ff5eb4dfc1" (UID: "aa5fe7ae-f633-4d08-a3a5-05ff5eb4dfc1"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:11:34 crc kubenswrapper[4753]: I0129 12:11:34.626385 4753 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/aa5fe7ae-f633-4d08-a3a5-05ff5eb4dfc1-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 12:11:34 crc kubenswrapper[4753]: I0129 12:11:34.626436 4753 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/aa5fe7ae-f633-4d08-a3a5-05ff5eb4dfc1-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 29 12:11:34 crc kubenswrapper[4753]: I0129 12:11:34.626464 4753 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/aa5fe7ae-f633-4d08-a3a5-05ff5eb4dfc1-client-ca\") on node \"crc\" DevicePath \"\"" Jan 29 12:11:34 crc kubenswrapper[4753]: I0129 12:11:34.626477 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sbp76\" (UniqueName: \"kubernetes.io/projected/aa5fe7ae-f633-4d08-a3a5-05ff5eb4dfc1-kube-api-access-sbp76\") on node \"crc\" DevicePath \"\"" Jan 29 12:11:34 crc kubenswrapper[4753]: I0129 12:11:34.626491 4753 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aa5fe7ae-f633-4d08-a3a5-05ff5eb4dfc1-config\") on node \"crc\" DevicePath \"\"" Jan 29 12:11:34 crc kubenswrapper[4753]: I0129 12:11:34.632684 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-64cb998744-dj6xp"] Jan 29 12:11:34 crc kubenswrapper[4753]: I0129 12:11:34.652706 4753 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-dgs2s" Jan 29 12:11:35 crc kubenswrapper[4753]: I0129 12:11:35.150882 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-64cb998744-dj6xp" event={"ID":"6554d341-0e97-4e30-9a6d-614e6ad77e40","Type":"ContainerStarted","Data":"dabacd420fb89b4337e44ac519fa85ff0c74bd55cb012807d876488307f8b673"} Jan 29 12:11:35 crc kubenswrapper[4753]: I0129 12:11:35.153088 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-58f96f7974-5vgpz" event={"ID":"aa5fe7ae-f633-4d08-a3a5-05ff5eb4dfc1","Type":"ContainerDied","Data":"50c1223ba173d163035008d505a4efaced39adac844f2279451b6aa04d819cc2"} Jan 29 12:11:35 crc kubenswrapper[4753]: I0129 12:11:35.153145 4753 scope.go:117] "RemoveContainer" containerID="3205f45e2a562b00298e4a89041971b9d747d21afb428dff89c9c08a016d597f" Jan 29 12:11:35 crc kubenswrapper[4753]: I0129 12:11:35.153145 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-58f96f7974-5vgpz" Jan 29 12:11:35 crc kubenswrapper[4753]: I0129 12:11:35.182254 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-58f96f7974-5vgpz"] Jan 29 12:11:35 crc kubenswrapper[4753]: I0129 12:11:35.185578 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-58f96f7974-5vgpz"] Jan 29 12:11:35 crc kubenswrapper[4753]: I0129 12:11:35.200059 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-dgs2s" Jan 29 12:11:35 crc kubenswrapper[4753]: I0129 12:11:35.413350 4753 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-z6xk4" Jan 29 12:11:35 crc kubenswrapper[4753]: I0129 12:11:35.580362 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-z6xk4" Jan 29 12:11:35 crc kubenswrapper[4753]: I0129 12:11:35.895586 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0f3756b4-7106-4712-8db0-5a70b0f8c76c" path="/var/lib/kubelet/pods/0f3756b4-7106-4712-8db0-5a70b0f8c76c/volumes" Jan 29 12:11:35 crc kubenswrapper[4753]: I0129 12:11:35.897324 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="aa5fe7ae-f633-4d08-a3a5-05ff5eb4dfc1" path="/var/lib/kubelet/pods/aa5fe7ae-f633-4d08-a3a5-05ff5eb4dfc1/volumes" Jan 29 12:11:36 crc kubenswrapper[4753]: I0129 12:11:36.160509 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-64cb998744-dj6xp" event={"ID":"6554d341-0e97-4e30-9a6d-614e6ad77e40","Type":"ContainerStarted","Data":"3eb86e82a40034f69289f58ef29b47e66741d67bd2c36b0255f29e7ab21b31d4"} Jan 29 12:11:36 crc kubenswrapper[4753]: I0129 12:11:36.160921 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-64cb998744-dj6xp" Jan 29 12:11:36 crc kubenswrapper[4753]: I0129 12:11:36.403134 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-64cb998744-dj6xp" Jan 29 12:11:36 crc kubenswrapper[4753]: I0129 12:11:36.425725 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-64cb998744-dj6xp" podStartSLOduration=4.425698826 podStartE2EDuration="4.425698826s" podCreationTimestamp="2026-01-29 12:11:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:11:36.182643211 +0000 UTC m=+310.434724696" watchObservedRunningTime="2026-01-29 12:11:36.425698826 +0000 UTC m=+310.677780281" Jan 29 12:11:36 crc kubenswrapper[4753]: I0129 12:11:36.803856 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-589f98ccbb-qr7kl"] Jan 29 12:11:36 crc kubenswrapper[4753]: E0129 12:11:36.804242 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aa5fe7ae-f633-4d08-a3a5-05ff5eb4dfc1" containerName="controller-manager" Jan 29 12:11:36 crc kubenswrapper[4753]: I0129 12:11:36.804267 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="aa5fe7ae-f633-4d08-a3a5-05ff5eb4dfc1" containerName="controller-manager" Jan 29 12:11:36 crc kubenswrapper[4753]: I0129 12:11:36.804456 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="aa5fe7ae-f633-4d08-a3a5-05ff5eb4dfc1" containerName="controller-manager" Jan 29 12:11:36 crc kubenswrapper[4753]: I0129 12:11:36.804933 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-589f98ccbb-qr7kl" Jan 29 12:11:36 crc kubenswrapper[4753]: I0129 12:11:36.807738 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 29 12:11:36 crc kubenswrapper[4753]: I0129 12:11:36.808717 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 29 12:11:36 crc kubenswrapper[4753]: I0129 12:11:36.809151 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 29 12:11:36 crc kubenswrapper[4753]: I0129 12:11:36.809446 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 29 12:11:36 crc kubenswrapper[4753]: I0129 12:11:36.809735 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 29 12:11:36 crc kubenswrapper[4753]: I0129 12:11:36.810006 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 29 12:11:36 crc kubenswrapper[4753]: I0129 12:11:36.828340 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 29 12:11:36 crc kubenswrapper[4753]: I0129 12:11:36.830361 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-589f98ccbb-qr7kl"] Jan 29 12:11:36 crc kubenswrapper[4753]: I0129 12:11:36.861521 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5xpbj\" (UniqueName: \"kubernetes.io/projected/b62c0441-3c47-4a71-8272-8d4b427296ef-kube-api-access-5xpbj\") pod \"controller-manager-589f98ccbb-qr7kl\" (UID: \"b62c0441-3c47-4a71-8272-8d4b427296ef\") " pod="openshift-controller-manager/controller-manager-589f98ccbb-qr7kl" Jan 29 12:11:36 crc kubenswrapper[4753]: I0129 12:11:36.861585 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b62c0441-3c47-4a71-8272-8d4b427296ef-client-ca\") pod \"controller-manager-589f98ccbb-qr7kl\" (UID: \"b62c0441-3c47-4a71-8272-8d4b427296ef\") " pod="openshift-controller-manager/controller-manager-589f98ccbb-qr7kl" Jan 29 12:11:36 crc kubenswrapper[4753]: I0129 12:11:36.861838 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/b62c0441-3c47-4a71-8272-8d4b427296ef-proxy-ca-bundles\") pod \"controller-manager-589f98ccbb-qr7kl\" (UID: \"b62c0441-3c47-4a71-8272-8d4b427296ef\") " pod="openshift-controller-manager/controller-manager-589f98ccbb-qr7kl" Jan 29 12:11:36 crc kubenswrapper[4753]: I0129 12:11:36.862025 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b62c0441-3c47-4a71-8272-8d4b427296ef-config\") pod \"controller-manager-589f98ccbb-qr7kl\" (UID: \"b62c0441-3c47-4a71-8272-8d4b427296ef\") " pod="openshift-controller-manager/controller-manager-589f98ccbb-qr7kl" Jan 29 12:11:36 crc kubenswrapper[4753]: I0129 12:11:36.862127 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b62c0441-3c47-4a71-8272-8d4b427296ef-serving-cert\") pod \"controller-manager-589f98ccbb-qr7kl\" (UID: \"b62c0441-3c47-4a71-8272-8d4b427296ef\") " pod="openshift-controller-manager/controller-manager-589f98ccbb-qr7kl" Jan 29 12:11:36 crc kubenswrapper[4753]: I0129 12:11:36.963197 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b62c0441-3c47-4a71-8272-8d4b427296ef-config\") pod \"controller-manager-589f98ccbb-qr7kl\" (UID: \"b62c0441-3c47-4a71-8272-8d4b427296ef\") " pod="openshift-controller-manager/controller-manager-589f98ccbb-qr7kl" Jan 29 12:11:36 crc kubenswrapper[4753]: I0129 12:11:36.963319 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b62c0441-3c47-4a71-8272-8d4b427296ef-serving-cert\") pod \"controller-manager-589f98ccbb-qr7kl\" (UID: \"b62c0441-3c47-4a71-8272-8d4b427296ef\") " pod="openshift-controller-manager/controller-manager-589f98ccbb-qr7kl" Jan 29 12:11:36 crc kubenswrapper[4753]: I0129 12:11:36.963419 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5xpbj\" (UniqueName: \"kubernetes.io/projected/b62c0441-3c47-4a71-8272-8d4b427296ef-kube-api-access-5xpbj\") pod \"controller-manager-589f98ccbb-qr7kl\" (UID: \"b62c0441-3c47-4a71-8272-8d4b427296ef\") " pod="openshift-controller-manager/controller-manager-589f98ccbb-qr7kl" Jan 29 12:11:36 crc kubenswrapper[4753]: I0129 12:11:36.964949 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b62c0441-3c47-4a71-8272-8d4b427296ef-client-ca\") pod \"controller-manager-589f98ccbb-qr7kl\" (UID: \"b62c0441-3c47-4a71-8272-8d4b427296ef\") " pod="openshift-controller-manager/controller-manager-589f98ccbb-qr7kl" Jan 29 12:11:36 crc kubenswrapper[4753]: I0129 12:11:36.964997 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b62c0441-3c47-4a71-8272-8d4b427296ef-config\") pod \"controller-manager-589f98ccbb-qr7kl\" (UID: \"b62c0441-3c47-4a71-8272-8d4b427296ef\") " pod="openshift-controller-manager/controller-manager-589f98ccbb-qr7kl" Jan 29 12:11:36 crc kubenswrapper[4753]: I0129 12:11:36.965989 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b62c0441-3c47-4a71-8272-8d4b427296ef-client-ca\") pod \"controller-manager-589f98ccbb-qr7kl\" (UID: \"b62c0441-3c47-4a71-8272-8d4b427296ef\") " pod="openshift-controller-manager/controller-manager-589f98ccbb-qr7kl" Jan 29 12:11:36 crc kubenswrapper[4753]: I0129 12:11:36.966218 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/b62c0441-3c47-4a71-8272-8d4b427296ef-proxy-ca-bundles\") pod \"controller-manager-589f98ccbb-qr7kl\" (UID: \"b62c0441-3c47-4a71-8272-8d4b427296ef\") " pod="openshift-controller-manager/controller-manager-589f98ccbb-qr7kl" Jan 29 12:11:36 crc kubenswrapper[4753]: I0129 12:11:36.968954 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/b62c0441-3c47-4a71-8272-8d4b427296ef-proxy-ca-bundles\") pod \"controller-manager-589f98ccbb-qr7kl\" (UID: \"b62c0441-3c47-4a71-8272-8d4b427296ef\") " pod="openshift-controller-manager/controller-manager-589f98ccbb-qr7kl" Jan 29 12:11:36 crc kubenswrapper[4753]: I0129 12:11:36.979514 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b62c0441-3c47-4a71-8272-8d4b427296ef-serving-cert\") pod \"controller-manager-589f98ccbb-qr7kl\" (UID: \"b62c0441-3c47-4a71-8272-8d4b427296ef\") " pod="openshift-controller-manager/controller-manager-589f98ccbb-qr7kl" Jan 29 12:11:36 crc kubenswrapper[4753]: I0129 12:11:36.983242 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5xpbj\" (UniqueName: \"kubernetes.io/projected/b62c0441-3c47-4a71-8272-8d4b427296ef-kube-api-access-5xpbj\") pod \"controller-manager-589f98ccbb-qr7kl\" (UID: \"b62c0441-3c47-4a71-8272-8d4b427296ef\") " pod="openshift-controller-manager/controller-manager-589f98ccbb-qr7kl" Jan 29 12:11:37 crc kubenswrapper[4753]: I0129 12:11:37.126112 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-589f98ccbb-qr7kl" Jan 29 12:11:37 crc kubenswrapper[4753]: I0129 12:11:37.935814 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-589f98ccbb-qr7kl"] Jan 29 12:11:37 crc kubenswrapper[4753]: W0129 12:11:37.945480 4753 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb62c0441_3c47_4a71_8272_8d4b427296ef.slice/crio-7259bf719f630bd9d30f8648c371ee48abcde1e43a91f230b742b8ea599f0dd3 WatchSource:0}: Error finding container 7259bf719f630bd9d30f8648c371ee48abcde1e43a91f230b742b8ea599f0dd3: Status 404 returned error can't find the container with id 7259bf719f630bd9d30f8648c371ee48abcde1e43a91f230b742b8ea599f0dd3 Jan 29 12:11:38 crc kubenswrapper[4753]: I0129 12:11:38.180792 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-589f98ccbb-qr7kl" event={"ID":"b62c0441-3c47-4a71-8272-8d4b427296ef","Type":"ContainerStarted","Data":"7259bf719f630bd9d30f8648c371ee48abcde1e43a91f230b742b8ea599f0dd3"} Jan 29 12:11:38 crc kubenswrapper[4753]: I0129 12:11:38.574644 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/downloads-7954f5f757-b2rb9" Jan 29 12:11:39 crc kubenswrapper[4753]: I0129 12:11:39.192607 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-589f98ccbb-qr7kl" event={"ID":"b62c0441-3c47-4a71-8272-8d4b427296ef","Type":"ContainerStarted","Data":"88bf8deeeb82c84f8cb5e3e5f7f5c5aeb8c97f204e044f8ee803a67dd0cf0a4b"} Jan 29 12:11:40 crc kubenswrapper[4753]: I0129 12:11:40.437622 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-589f98ccbb-qr7kl" Jan 29 12:11:40 crc kubenswrapper[4753]: I0129 12:11:40.490915 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-589f98ccbb-qr7kl" Jan 29 12:11:40 crc kubenswrapper[4753]: I0129 12:11:40.495646 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-589f98ccbb-qr7kl" podStartSLOduration=8.495627004 podStartE2EDuration="8.495627004s" podCreationTimestamp="2026-01-29 12:11:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:11:40.492564243 +0000 UTC m=+314.744645698" watchObservedRunningTime="2026-01-29 12:11:40.495627004 +0000 UTC m=+314.747708459" Jan 29 12:11:52 crc kubenswrapper[4753]: I0129 12:11:52.357888 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-589f98ccbb-qr7kl"] Jan 29 12:11:52 crc kubenswrapper[4753]: I0129 12:11:52.359391 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-589f98ccbb-qr7kl" podUID="b62c0441-3c47-4a71-8272-8d4b427296ef" containerName="controller-manager" containerID="cri-o://88bf8deeeb82c84f8cb5e3e5f7f5c5aeb8c97f204e044f8ee803a67dd0cf0a4b" gracePeriod=30 Jan 29 12:11:52 crc kubenswrapper[4753]: I0129 12:11:52.454277 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-64cb998744-dj6xp"] Jan 29 12:11:52 crc kubenswrapper[4753]: I0129 12:11:52.454586 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-64cb998744-dj6xp" podUID="6554d341-0e97-4e30-9a6d-614e6ad77e40" containerName="route-controller-manager" containerID="cri-o://3eb86e82a40034f69289f58ef29b47e66741d67bd2c36b0255f29e7ab21b31d4" gracePeriod=30 Jan 29 12:11:53 crc kubenswrapper[4753]: I0129 12:11:53.787033 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-589f98ccbb-qr7kl" Jan 29 12:11:53 crc kubenswrapper[4753]: I0129 12:11:53.843323 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-844bbdf588-qdt2h"] Jan 29 12:11:53 crc kubenswrapper[4753]: E0129 12:11:53.843748 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b62c0441-3c47-4a71-8272-8d4b427296ef" containerName="controller-manager" Jan 29 12:11:53 crc kubenswrapper[4753]: I0129 12:11:53.843773 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="b62c0441-3c47-4a71-8272-8d4b427296ef" containerName="controller-manager" Jan 29 12:11:53 crc kubenswrapper[4753]: I0129 12:11:53.843991 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="b62c0441-3c47-4a71-8272-8d4b427296ef" containerName="controller-manager" Jan 29 12:11:53 crc kubenswrapper[4753]: I0129 12:11:53.844653 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-844bbdf588-qdt2h" Jan 29 12:11:53 crc kubenswrapper[4753]: I0129 12:11:53.847704 4753 generic.go:334] "Generic (PLEG): container finished" podID="6554d341-0e97-4e30-9a6d-614e6ad77e40" containerID="3eb86e82a40034f69289f58ef29b47e66741d67bd2c36b0255f29e7ab21b31d4" exitCode=0 Jan 29 12:11:53 crc kubenswrapper[4753]: I0129 12:11:53.847820 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-64cb998744-dj6xp" event={"ID":"6554d341-0e97-4e30-9a6d-614e6ad77e40","Type":"ContainerDied","Data":"3eb86e82a40034f69289f58ef29b47e66741d67bd2c36b0255f29e7ab21b31d4"} Jan 29 12:11:53 crc kubenswrapper[4753]: I0129 12:11:53.847937 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-844bbdf588-qdt2h"] Jan 29 12:11:53 crc kubenswrapper[4753]: I0129 12:11:53.859022 4753 generic.go:334] "Generic (PLEG): container finished" podID="b62c0441-3c47-4a71-8272-8d4b427296ef" containerID="88bf8deeeb82c84f8cb5e3e5f7f5c5aeb8c97f204e044f8ee803a67dd0cf0a4b" exitCode=0 Jan 29 12:11:53 crc kubenswrapper[4753]: I0129 12:11:53.859074 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-589f98ccbb-qr7kl" event={"ID":"b62c0441-3c47-4a71-8272-8d4b427296ef","Type":"ContainerDied","Data":"88bf8deeeb82c84f8cb5e3e5f7f5c5aeb8c97f204e044f8ee803a67dd0cf0a4b"} Jan 29 12:11:53 crc kubenswrapper[4753]: I0129 12:11:53.859114 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-589f98ccbb-qr7kl" event={"ID":"b62c0441-3c47-4a71-8272-8d4b427296ef","Type":"ContainerDied","Data":"7259bf719f630bd9d30f8648c371ee48abcde1e43a91f230b742b8ea599f0dd3"} Jan 29 12:11:53 crc kubenswrapper[4753]: I0129 12:11:53.859140 4753 scope.go:117] "RemoveContainer" containerID="88bf8deeeb82c84f8cb5e3e5f7f5c5aeb8c97f204e044f8ee803a67dd0cf0a4b" Jan 29 12:11:53 crc kubenswrapper[4753]: I0129 12:11:53.859315 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-589f98ccbb-qr7kl" Jan 29 12:11:53 crc kubenswrapper[4753]: I0129 12:11:53.892878 4753 scope.go:117] "RemoveContainer" containerID="88bf8deeeb82c84f8cb5e3e5f7f5c5aeb8c97f204e044f8ee803a67dd0cf0a4b" Jan 29 12:11:53 crc kubenswrapper[4753]: E0129 12:11:53.906532 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"88bf8deeeb82c84f8cb5e3e5f7f5c5aeb8c97f204e044f8ee803a67dd0cf0a4b\": container with ID starting with 88bf8deeeb82c84f8cb5e3e5f7f5c5aeb8c97f204e044f8ee803a67dd0cf0a4b not found: ID does not exist" containerID="88bf8deeeb82c84f8cb5e3e5f7f5c5aeb8c97f204e044f8ee803a67dd0cf0a4b" Jan 29 12:11:53 crc kubenswrapper[4753]: I0129 12:11:53.906914 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"88bf8deeeb82c84f8cb5e3e5f7f5c5aeb8c97f204e044f8ee803a67dd0cf0a4b"} err="failed to get container status \"88bf8deeeb82c84f8cb5e3e5f7f5c5aeb8c97f204e044f8ee803a67dd0cf0a4b\": rpc error: code = NotFound desc = could not find container \"88bf8deeeb82c84f8cb5e3e5f7f5c5aeb8c97f204e044f8ee803a67dd0cf0a4b\": container with ID starting with 88bf8deeeb82c84f8cb5e3e5f7f5c5aeb8c97f204e044f8ee803a67dd0cf0a4b not found: ID does not exist" Jan 29 12:11:53 crc kubenswrapper[4753]: I0129 12:11:53.984979 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b62c0441-3c47-4a71-8272-8d4b427296ef-serving-cert\") pod \"b62c0441-3c47-4a71-8272-8d4b427296ef\" (UID: \"b62c0441-3c47-4a71-8272-8d4b427296ef\") " Jan 29 12:11:53 crc kubenswrapper[4753]: I0129 12:11:53.985052 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5xpbj\" (UniqueName: \"kubernetes.io/projected/b62c0441-3c47-4a71-8272-8d4b427296ef-kube-api-access-5xpbj\") pod \"b62c0441-3c47-4a71-8272-8d4b427296ef\" (UID: \"b62c0441-3c47-4a71-8272-8d4b427296ef\") " Jan 29 12:11:53 crc kubenswrapper[4753]: I0129 12:11:53.985093 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b62c0441-3c47-4a71-8272-8d4b427296ef-client-ca\") pod \"b62c0441-3c47-4a71-8272-8d4b427296ef\" (UID: \"b62c0441-3c47-4a71-8272-8d4b427296ef\") " Jan 29 12:11:53 crc kubenswrapper[4753]: I0129 12:11:53.985180 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b62c0441-3c47-4a71-8272-8d4b427296ef-config\") pod \"b62c0441-3c47-4a71-8272-8d4b427296ef\" (UID: \"b62c0441-3c47-4a71-8272-8d4b427296ef\") " Jan 29 12:11:53 crc kubenswrapper[4753]: I0129 12:11:53.985278 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/b62c0441-3c47-4a71-8272-8d4b427296ef-proxy-ca-bundles\") pod \"b62c0441-3c47-4a71-8272-8d4b427296ef\" (UID: \"b62c0441-3c47-4a71-8272-8d4b427296ef\") " Jan 29 12:11:53 crc kubenswrapper[4753]: I0129 12:11:53.985505 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8b2qn\" (UniqueName: \"kubernetes.io/projected/10e48cb3-f357-49bd-8eee-a576c8814487-kube-api-access-8b2qn\") pod \"controller-manager-844bbdf588-qdt2h\" (UID: \"10e48cb3-f357-49bd-8eee-a576c8814487\") " pod="openshift-controller-manager/controller-manager-844bbdf588-qdt2h" Jan 29 12:11:53 crc kubenswrapper[4753]: I0129 12:11:53.985580 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/10e48cb3-f357-49bd-8eee-a576c8814487-proxy-ca-bundles\") pod \"controller-manager-844bbdf588-qdt2h\" (UID: \"10e48cb3-f357-49bd-8eee-a576c8814487\") " pod="openshift-controller-manager/controller-manager-844bbdf588-qdt2h" Jan 29 12:11:53 crc kubenswrapper[4753]: I0129 12:11:53.985740 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/10e48cb3-f357-49bd-8eee-a576c8814487-client-ca\") pod \"controller-manager-844bbdf588-qdt2h\" (UID: \"10e48cb3-f357-49bd-8eee-a576c8814487\") " pod="openshift-controller-manager/controller-manager-844bbdf588-qdt2h" Jan 29 12:11:53 crc kubenswrapper[4753]: I0129 12:11:53.985853 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/10e48cb3-f357-49bd-8eee-a576c8814487-config\") pod \"controller-manager-844bbdf588-qdt2h\" (UID: \"10e48cb3-f357-49bd-8eee-a576c8814487\") " pod="openshift-controller-manager/controller-manager-844bbdf588-qdt2h" Jan 29 12:11:53 crc kubenswrapper[4753]: I0129 12:11:53.985903 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/10e48cb3-f357-49bd-8eee-a576c8814487-serving-cert\") pod \"controller-manager-844bbdf588-qdt2h\" (UID: \"10e48cb3-f357-49bd-8eee-a576c8814487\") " pod="openshift-controller-manager/controller-manager-844bbdf588-qdt2h" Jan 29 12:11:53 crc kubenswrapper[4753]: I0129 12:11:53.985947 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b62c0441-3c47-4a71-8272-8d4b427296ef-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "b62c0441-3c47-4a71-8272-8d4b427296ef" (UID: "b62c0441-3c47-4a71-8272-8d4b427296ef"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:11:53 crc kubenswrapper[4753]: I0129 12:11:53.986005 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b62c0441-3c47-4a71-8272-8d4b427296ef-client-ca" (OuterVolumeSpecName: "client-ca") pod "b62c0441-3c47-4a71-8272-8d4b427296ef" (UID: "b62c0441-3c47-4a71-8272-8d4b427296ef"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:11:53 crc kubenswrapper[4753]: I0129 12:11:53.986040 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b62c0441-3c47-4a71-8272-8d4b427296ef-config" (OuterVolumeSpecName: "config") pod "b62c0441-3c47-4a71-8272-8d4b427296ef" (UID: "b62c0441-3c47-4a71-8272-8d4b427296ef"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:11:53 crc kubenswrapper[4753]: I0129 12:11:53.986079 4753 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/b62c0441-3c47-4a71-8272-8d4b427296ef-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 29 12:11:53 crc kubenswrapper[4753]: I0129 12:11:53.998370 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b62c0441-3c47-4a71-8272-8d4b427296ef-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "b62c0441-3c47-4a71-8272-8d4b427296ef" (UID: "b62c0441-3c47-4a71-8272-8d4b427296ef"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:11:53 crc kubenswrapper[4753]: I0129 12:11:53.998449 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b62c0441-3c47-4a71-8272-8d4b427296ef-kube-api-access-5xpbj" (OuterVolumeSpecName: "kube-api-access-5xpbj") pod "b62c0441-3c47-4a71-8272-8d4b427296ef" (UID: "b62c0441-3c47-4a71-8272-8d4b427296ef"). InnerVolumeSpecName "kube-api-access-5xpbj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:11:54 crc kubenswrapper[4753]: I0129 12:11:54.088332 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/10e48cb3-f357-49bd-8eee-a576c8814487-client-ca\") pod \"controller-manager-844bbdf588-qdt2h\" (UID: \"10e48cb3-f357-49bd-8eee-a576c8814487\") " pod="openshift-controller-manager/controller-manager-844bbdf588-qdt2h" Jan 29 12:11:54 crc kubenswrapper[4753]: I0129 12:11:54.088457 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/10e48cb3-f357-49bd-8eee-a576c8814487-config\") pod \"controller-manager-844bbdf588-qdt2h\" (UID: \"10e48cb3-f357-49bd-8eee-a576c8814487\") " pod="openshift-controller-manager/controller-manager-844bbdf588-qdt2h" Jan 29 12:11:54 crc kubenswrapper[4753]: I0129 12:11:54.089344 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/10e48cb3-f357-49bd-8eee-a576c8814487-client-ca\") pod \"controller-manager-844bbdf588-qdt2h\" (UID: \"10e48cb3-f357-49bd-8eee-a576c8814487\") " pod="openshift-controller-manager/controller-manager-844bbdf588-qdt2h" Jan 29 12:11:54 crc kubenswrapper[4753]: I0129 12:11:54.089402 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/10e48cb3-f357-49bd-8eee-a576c8814487-serving-cert\") pod \"controller-manager-844bbdf588-qdt2h\" (UID: \"10e48cb3-f357-49bd-8eee-a576c8814487\") " pod="openshift-controller-manager/controller-manager-844bbdf588-qdt2h" Jan 29 12:11:54 crc kubenswrapper[4753]: I0129 12:11:54.089517 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8b2qn\" (UniqueName: \"kubernetes.io/projected/10e48cb3-f357-49bd-8eee-a576c8814487-kube-api-access-8b2qn\") pod \"controller-manager-844bbdf588-qdt2h\" (UID: \"10e48cb3-f357-49bd-8eee-a576c8814487\") " pod="openshift-controller-manager/controller-manager-844bbdf588-qdt2h" Jan 29 12:11:54 crc kubenswrapper[4753]: I0129 12:11:54.089552 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/10e48cb3-f357-49bd-8eee-a576c8814487-proxy-ca-bundles\") pod \"controller-manager-844bbdf588-qdt2h\" (UID: \"10e48cb3-f357-49bd-8eee-a576c8814487\") " pod="openshift-controller-manager/controller-manager-844bbdf588-qdt2h" Jan 29 12:11:54 crc kubenswrapper[4753]: I0129 12:11:54.089630 4753 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b62c0441-3c47-4a71-8272-8d4b427296ef-config\") on node \"crc\" DevicePath \"\"" Jan 29 12:11:54 crc kubenswrapper[4753]: I0129 12:11:54.089645 4753 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b62c0441-3c47-4a71-8272-8d4b427296ef-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 12:11:54 crc kubenswrapper[4753]: I0129 12:11:54.089661 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5xpbj\" (UniqueName: \"kubernetes.io/projected/b62c0441-3c47-4a71-8272-8d4b427296ef-kube-api-access-5xpbj\") on node \"crc\" DevicePath \"\"" Jan 29 12:11:54 crc kubenswrapper[4753]: I0129 12:11:54.089670 4753 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b62c0441-3c47-4a71-8272-8d4b427296ef-client-ca\") on node \"crc\" DevicePath \"\"" Jan 29 12:11:54 crc kubenswrapper[4753]: I0129 12:11:54.090028 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/10e48cb3-f357-49bd-8eee-a576c8814487-config\") pod \"controller-manager-844bbdf588-qdt2h\" (UID: \"10e48cb3-f357-49bd-8eee-a576c8814487\") " pod="openshift-controller-manager/controller-manager-844bbdf588-qdt2h" Jan 29 12:11:54 crc kubenswrapper[4753]: I0129 12:11:54.090575 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/10e48cb3-f357-49bd-8eee-a576c8814487-proxy-ca-bundles\") pod \"controller-manager-844bbdf588-qdt2h\" (UID: \"10e48cb3-f357-49bd-8eee-a576c8814487\") " pod="openshift-controller-manager/controller-manager-844bbdf588-qdt2h" Jan 29 12:11:54 crc kubenswrapper[4753]: I0129 12:11:54.093707 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/10e48cb3-f357-49bd-8eee-a576c8814487-serving-cert\") pod \"controller-manager-844bbdf588-qdt2h\" (UID: \"10e48cb3-f357-49bd-8eee-a576c8814487\") " pod="openshift-controller-manager/controller-manager-844bbdf588-qdt2h" Jan 29 12:11:54 crc kubenswrapper[4753]: I0129 12:11:54.114164 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8b2qn\" (UniqueName: \"kubernetes.io/projected/10e48cb3-f357-49bd-8eee-a576c8814487-kube-api-access-8b2qn\") pod \"controller-manager-844bbdf588-qdt2h\" (UID: \"10e48cb3-f357-49bd-8eee-a576c8814487\") " pod="openshift-controller-manager/controller-manager-844bbdf588-qdt2h" Jan 29 12:11:54 crc kubenswrapper[4753]: I0129 12:11:54.199603 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-589f98ccbb-qr7kl"] Jan 29 12:11:54 crc kubenswrapper[4753]: I0129 12:11:54.203354 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-589f98ccbb-qr7kl"] Jan 29 12:11:54 crc kubenswrapper[4753]: I0129 12:11:54.209501 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-844bbdf588-qdt2h" Jan 29 12:11:54 crc kubenswrapper[4753]: I0129 12:11:54.223359 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-64cb998744-dj6xp" Jan 29 12:11:54 crc kubenswrapper[4753]: I0129 12:11:54.394356 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6554d341-0e97-4e30-9a6d-614e6ad77e40-client-ca\") pod \"6554d341-0e97-4e30-9a6d-614e6ad77e40\" (UID: \"6554d341-0e97-4e30-9a6d-614e6ad77e40\") " Jan 29 12:11:54 crc kubenswrapper[4753]: I0129 12:11:54.394848 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6554d341-0e97-4e30-9a6d-614e6ad77e40-serving-cert\") pod \"6554d341-0e97-4e30-9a6d-614e6ad77e40\" (UID: \"6554d341-0e97-4e30-9a6d-614e6ad77e40\") " Jan 29 12:11:54 crc kubenswrapper[4753]: I0129 12:11:54.394884 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6554d341-0e97-4e30-9a6d-614e6ad77e40-config\") pod \"6554d341-0e97-4e30-9a6d-614e6ad77e40\" (UID: \"6554d341-0e97-4e30-9a6d-614e6ad77e40\") " Jan 29 12:11:54 crc kubenswrapper[4753]: I0129 12:11:54.395026 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jvkk7\" (UniqueName: \"kubernetes.io/projected/6554d341-0e97-4e30-9a6d-614e6ad77e40-kube-api-access-jvkk7\") pod \"6554d341-0e97-4e30-9a6d-614e6ad77e40\" (UID: \"6554d341-0e97-4e30-9a6d-614e6ad77e40\") " Jan 29 12:11:54 crc kubenswrapper[4753]: I0129 12:11:54.395802 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6554d341-0e97-4e30-9a6d-614e6ad77e40-client-ca" (OuterVolumeSpecName: "client-ca") pod "6554d341-0e97-4e30-9a6d-614e6ad77e40" (UID: "6554d341-0e97-4e30-9a6d-614e6ad77e40"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:11:54 crc kubenswrapper[4753]: I0129 12:11:54.395939 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6554d341-0e97-4e30-9a6d-614e6ad77e40-config" (OuterVolumeSpecName: "config") pod "6554d341-0e97-4e30-9a6d-614e6ad77e40" (UID: "6554d341-0e97-4e30-9a6d-614e6ad77e40"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:11:54 crc kubenswrapper[4753]: I0129 12:11:54.405946 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6554d341-0e97-4e30-9a6d-614e6ad77e40-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6554d341-0e97-4e30-9a6d-614e6ad77e40" (UID: "6554d341-0e97-4e30-9a6d-614e6ad77e40"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:11:54 crc kubenswrapper[4753]: I0129 12:11:54.406773 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6554d341-0e97-4e30-9a6d-614e6ad77e40-kube-api-access-jvkk7" (OuterVolumeSpecName: "kube-api-access-jvkk7") pod "6554d341-0e97-4e30-9a6d-614e6ad77e40" (UID: "6554d341-0e97-4e30-9a6d-614e6ad77e40"). InnerVolumeSpecName "kube-api-access-jvkk7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:11:54 crc kubenswrapper[4753]: I0129 12:11:54.497218 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jvkk7\" (UniqueName: \"kubernetes.io/projected/6554d341-0e97-4e30-9a6d-614e6ad77e40-kube-api-access-jvkk7\") on node \"crc\" DevicePath \"\"" Jan 29 12:11:54 crc kubenswrapper[4753]: I0129 12:11:54.497306 4753 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6554d341-0e97-4e30-9a6d-614e6ad77e40-client-ca\") on node \"crc\" DevicePath \"\"" Jan 29 12:11:54 crc kubenswrapper[4753]: I0129 12:11:54.497325 4753 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6554d341-0e97-4e30-9a6d-614e6ad77e40-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 12:11:54 crc kubenswrapper[4753]: I0129 12:11:54.497336 4753 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6554d341-0e97-4e30-9a6d-614e6ad77e40-config\") on node \"crc\" DevicePath \"\"" Jan 29 12:11:54 crc kubenswrapper[4753]: I0129 12:11:54.657172 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-844bbdf588-qdt2h"] Jan 29 12:11:54 crc kubenswrapper[4753]: W0129 12:11:54.661897 4753 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod10e48cb3_f357_49bd_8eee_a576c8814487.slice/crio-a001c08e5aa2986118d6d4711503ebd0ab8573a6c88c3cebf525d1e8a6090dfb WatchSource:0}: Error finding container a001c08e5aa2986118d6d4711503ebd0ab8573a6c88c3cebf525d1e8a6090dfb: Status 404 returned error can't find the container with id a001c08e5aa2986118d6d4711503ebd0ab8573a6c88c3cebf525d1e8a6090dfb Jan 29 12:11:54 crc kubenswrapper[4753]: I0129 12:11:54.867933 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-64cb998744-dj6xp" event={"ID":"6554d341-0e97-4e30-9a6d-614e6ad77e40","Type":"ContainerDied","Data":"dabacd420fb89b4337e44ac519fa85ff0c74bd55cb012807d876488307f8b673"} Jan 29 12:11:54 crc kubenswrapper[4753]: I0129 12:11:54.867975 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-64cb998744-dj6xp" Jan 29 12:11:54 crc kubenswrapper[4753]: I0129 12:11:54.868009 4753 scope.go:117] "RemoveContainer" containerID="3eb86e82a40034f69289f58ef29b47e66741d67bd2c36b0255f29e7ab21b31d4" Jan 29 12:11:54 crc kubenswrapper[4753]: I0129 12:11:54.870248 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-844bbdf588-qdt2h" event={"ID":"10e48cb3-f357-49bd-8eee-a576c8814487","Type":"ContainerStarted","Data":"a001c08e5aa2986118d6d4711503ebd0ab8573a6c88c3cebf525d1e8a6090dfb"} Jan 29 12:11:54 crc kubenswrapper[4753]: I0129 12:11:54.898780 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-64cb998744-dj6xp"] Jan 29 12:11:54 crc kubenswrapper[4753]: I0129 12:11:54.903685 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-64cb998744-dj6xp"] Jan 29 12:11:55 crc kubenswrapper[4753]: I0129 12:11:55.916038 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6554d341-0e97-4e30-9a6d-614e6ad77e40" path="/var/lib/kubelet/pods/6554d341-0e97-4e30-9a6d-614e6ad77e40/volumes" Jan 29 12:11:55 crc kubenswrapper[4753]: I0129 12:11:55.916634 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b62c0441-3c47-4a71-8272-8d4b427296ef" path="/var/lib/kubelet/pods/b62c0441-3c47-4a71-8272-8d4b427296ef/volumes" Jan 29 12:11:55 crc kubenswrapper[4753]: I0129 12:11:55.927471 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-844bbdf588-qdt2h" event={"ID":"10e48cb3-f357-49bd-8eee-a576c8814487","Type":"ContainerStarted","Data":"252f4e2e3424b357db5c66af56852ed8dd6c17586bbf3fb35216f8cf1954cab3"} Jan 29 12:11:56 crc kubenswrapper[4753]: I0129 12:11:56.817325 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-c8bcc84b6-vdbzf"] Jan 29 12:11:56 crc kubenswrapper[4753]: E0129 12:11:56.818495 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6554d341-0e97-4e30-9a6d-614e6ad77e40" containerName="route-controller-manager" Jan 29 12:11:56 crc kubenswrapper[4753]: I0129 12:11:56.818609 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="6554d341-0e97-4e30-9a6d-614e6ad77e40" containerName="route-controller-manager" Jan 29 12:11:56 crc kubenswrapper[4753]: I0129 12:11:56.818974 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="6554d341-0e97-4e30-9a6d-614e6ad77e40" containerName="route-controller-manager" Jan 29 12:11:56 crc kubenswrapper[4753]: I0129 12:11:56.819617 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-c8bcc84b6-vdbzf" Jan 29 12:11:56 crc kubenswrapper[4753]: I0129 12:11:56.821571 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 29 12:11:56 crc kubenswrapper[4753]: I0129 12:11:56.822249 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 29 12:11:56 crc kubenswrapper[4753]: I0129 12:11:56.822259 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 29 12:11:56 crc kubenswrapper[4753]: I0129 12:11:56.822477 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 29 12:11:56 crc kubenswrapper[4753]: I0129 12:11:56.822668 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 29 12:11:56 crc kubenswrapper[4753]: I0129 12:11:56.826839 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 29 12:11:56 crc kubenswrapper[4753]: I0129 12:11:56.835934 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-c8bcc84b6-vdbzf"] Jan 29 12:11:56 crc kubenswrapper[4753]: I0129 12:11:56.933841 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-844bbdf588-qdt2h" Jan 29 12:11:56 crc kubenswrapper[4753]: I0129 12:11:56.945979 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-844bbdf588-qdt2h" Jan 29 12:11:56 crc kubenswrapper[4753]: I0129 12:11:56.957395 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-844bbdf588-qdt2h" podStartSLOduration=4.957375202 podStartE2EDuration="4.957375202s" podCreationTimestamp="2026-01-29 12:11:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:11:56.955082813 +0000 UTC m=+331.207164268" watchObservedRunningTime="2026-01-29 12:11:56.957375202 +0000 UTC m=+331.209456647" Jan 29 12:11:57 crc kubenswrapper[4753]: I0129 12:11:57.008544 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6958c11f-6d19-4cd0-be14-e796f1df37ee-serving-cert\") pod \"route-controller-manager-c8bcc84b6-vdbzf\" (UID: \"6958c11f-6d19-4cd0-be14-e796f1df37ee\") " pod="openshift-route-controller-manager/route-controller-manager-c8bcc84b6-vdbzf" Jan 29 12:11:57 crc kubenswrapper[4753]: I0129 12:11:57.008928 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6958c11f-6d19-4cd0-be14-e796f1df37ee-config\") pod \"route-controller-manager-c8bcc84b6-vdbzf\" (UID: \"6958c11f-6d19-4cd0-be14-e796f1df37ee\") " pod="openshift-route-controller-manager/route-controller-manager-c8bcc84b6-vdbzf" Jan 29 12:11:57 crc kubenswrapper[4753]: I0129 12:11:57.009080 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6958c11f-6d19-4cd0-be14-e796f1df37ee-client-ca\") pod \"route-controller-manager-c8bcc84b6-vdbzf\" (UID: \"6958c11f-6d19-4cd0-be14-e796f1df37ee\") " pod="openshift-route-controller-manager/route-controller-manager-c8bcc84b6-vdbzf" Jan 29 12:11:57 crc kubenswrapper[4753]: I0129 12:11:57.009242 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jn78n\" (UniqueName: \"kubernetes.io/projected/6958c11f-6d19-4cd0-be14-e796f1df37ee-kube-api-access-jn78n\") pod \"route-controller-manager-c8bcc84b6-vdbzf\" (UID: \"6958c11f-6d19-4cd0-be14-e796f1df37ee\") " pod="openshift-route-controller-manager/route-controller-manager-c8bcc84b6-vdbzf" Jan 29 12:11:57 crc kubenswrapper[4753]: I0129 12:11:57.110450 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jn78n\" (UniqueName: \"kubernetes.io/projected/6958c11f-6d19-4cd0-be14-e796f1df37ee-kube-api-access-jn78n\") pod \"route-controller-manager-c8bcc84b6-vdbzf\" (UID: \"6958c11f-6d19-4cd0-be14-e796f1df37ee\") " pod="openshift-route-controller-manager/route-controller-manager-c8bcc84b6-vdbzf" Jan 29 12:11:57 crc kubenswrapper[4753]: I0129 12:11:57.110651 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6958c11f-6d19-4cd0-be14-e796f1df37ee-serving-cert\") pod \"route-controller-manager-c8bcc84b6-vdbzf\" (UID: \"6958c11f-6d19-4cd0-be14-e796f1df37ee\") " pod="openshift-route-controller-manager/route-controller-manager-c8bcc84b6-vdbzf" Jan 29 12:11:57 crc kubenswrapper[4753]: I0129 12:11:57.110678 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6958c11f-6d19-4cd0-be14-e796f1df37ee-config\") pod \"route-controller-manager-c8bcc84b6-vdbzf\" (UID: \"6958c11f-6d19-4cd0-be14-e796f1df37ee\") " pod="openshift-route-controller-manager/route-controller-manager-c8bcc84b6-vdbzf" Jan 29 12:11:57 crc kubenswrapper[4753]: I0129 12:11:57.110710 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6958c11f-6d19-4cd0-be14-e796f1df37ee-client-ca\") pod \"route-controller-manager-c8bcc84b6-vdbzf\" (UID: \"6958c11f-6d19-4cd0-be14-e796f1df37ee\") " pod="openshift-route-controller-manager/route-controller-manager-c8bcc84b6-vdbzf" Jan 29 12:11:57 crc kubenswrapper[4753]: I0129 12:11:57.112254 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6958c11f-6d19-4cd0-be14-e796f1df37ee-config\") pod \"route-controller-manager-c8bcc84b6-vdbzf\" (UID: \"6958c11f-6d19-4cd0-be14-e796f1df37ee\") " pod="openshift-route-controller-manager/route-controller-manager-c8bcc84b6-vdbzf" Jan 29 12:11:57 crc kubenswrapper[4753]: I0129 12:11:57.112804 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6958c11f-6d19-4cd0-be14-e796f1df37ee-client-ca\") pod \"route-controller-manager-c8bcc84b6-vdbzf\" (UID: \"6958c11f-6d19-4cd0-be14-e796f1df37ee\") " pod="openshift-route-controller-manager/route-controller-manager-c8bcc84b6-vdbzf" Jan 29 12:11:57 crc kubenswrapper[4753]: I0129 12:11:57.126759 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6958c11f-6d19-4cd0-be14-e796f1df37ee-serving-cert\") pod \"route-controller-manager-c8bcc84b6-vdbzf\" (UID: \"6958c11f-6d19-4cd0-be14-e796f1df37ee\") " pod="openshift-route-controller-manager/route-controller-manager-c8bcc84b6-vdbzf" Jan 29 12:11:57 crc kubenswrapper[4753]: I0129 12:11:57.130081 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jn78n\" (UniqueName: \"kubernetes.io/projected/6958c11f-6d19-4cd0-be14-e796f1df37ee-kube-api-access-jn78n\") pod \"route-controller-manager-c8bcc84b6-vdbzf\" (UID: \"6958c11f-6d19-4cd0-be14-e796f1df37ee\") " pod="openshift-route-controller-manager/route-controller-manager-c8bcc84b6-vdbzf" Jan 29 12:11:57 crc kubenswrapper[4753]: I0129 12:11:57.138510 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-c8bcc84b6-vdbzf" Jan 29 12:11:57 crc kubenswrapper[4753]: I0129 12:11:57.548203 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-c8bcc84b6-vdbzf"] Jan 29 12:11:58 crc kubenswrapper[4753]: I0129 12:11:58.013568 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-c8bcc84b6-vdbzf" event={"ID":"6958c11f-6d19-4cd0-be14-e796f1df37ee","Type":"ContainerStarted","Data":"6f13ca5bcef004607a74cd671c47b73cabc51cc8e1bdfc26e90877033d2eb4fe"} Jan 29 12:11:59 crc kubenswrapper[4753]: I0129 12:11:59.021526 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-c8bcc84b6-vdbzf" event={"ID":"6958c11f-6d19-4cd0-be14-e796f1df37ee","Type":"ContainerStarted","Data":"4c75a76ce4e80a4a8fbd77a8e8cd5947963eca8a13120cf1b19a46306c9d5376"} Jan 29 12:12:00 crc kubenswrapper[4753]: I0129 12:12:00.028232 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-c8bcc84b6-vdbzf" Jan 29 12:12:00 crc kubenswrapper[4753]: I0129 12:12:00.033335 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-c8bcc84b6-vdbzf" Jan 29 12:12:00 crc kubenswrapper[4753]: I0129 12:12:00.049473 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-c8bcc84b6-vdbzf" podStartSLOduration=8.049446129 podStartE2EDuration="8.049446129s" podCreationTimestamp="2026-01-29 12:11:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:12:00.04549789 +0000 UTC m=+334.297579345" watchObservedRunningTime="2026-01-29 12:12:00.049446129 +0000 UTC m=+334.301527584" Jan 29 12:12:06 crc kubenswrapper[4753]: I0129 12:12:06.272812 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-7dkr6"] Jan 29 12:12:06 crc kubenswrapper[4753]: I0129 12:12:06.273633 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-7dkr6" podUID="318aa5db-6b19-4efe-8c5d-00fbb4a84b13" containerName="registry-server" containerID="cri-o://7e5b472b91c06d451cc7b21999fbe372bbc0b9e7274b74bb9d6f37e446cbab33" gracePeriod=2 Jan 29 12:12:06 crc kubenswrapper[4753]: I0129 12:12:06.830808 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-7dkr6" Jan 29 12:12:06 crc kubenswrapper[4753]: I0129 12:12:06.978877 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/318aa5db-6b19-4efe-8c5d-00fbb4a84b13-utilities\") pod \"318aa5db-6b19-4efe-8c5d-00fbb4a84b13\" (UID: \"318aa5db-6b19-4efe-8c5d-00fbb4a84b13\") " Jan 29 12:12:06 crc kubenswrapper[4753]: I0129 12:12:06.979048 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jqg9m\" (UniqueName: \"kubernetes.io/projected/318aa5db-6b19-4efe-8c5d-00fbb4a84b13-kube-api-access-jqg9m\") pod \"318aa5db-6b19-4efe-8c5d-00fbb4a84b13\" (UID: \"318aa5db-6b19-4efe-8c5d-00fbb4a84b13\") " Jan 29 12:12:06 crc kubenswrapper[4753]: I0129 12:12:06.979333 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/318aa5db-6b19-4efe-8c5d-00fbb4a84b13-catalog-content\") pod \"318aa5db-6b19-4efe-8c5d-00fbb4a84b13\" (UID: \"318aa5db-6b19-4efe-8c5d-00fbb4a84b13\") " Jan 29 12:12:06 crc kubenswrapper[4753]: I0129 12:12:06.980122 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/318aa5db-6b19-4efe-8c5d-00fbb4a84b13-utilities" (OuterVolumeSpecName: "utilities") pod "318aa5db-6b19-4efe-8c5d-00fbb4a84b13" (UID: "318aa5db-6b19-4efe-8c5d-00fbb4a84b13"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:12:06 crc kubenswrapper[4753]: I0129 12:12:06.992794 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/318aa5db-6b19-4efe-8c5d-00fbb4a84b13-kube-api-access-jqg9m" (OuterVolumeSpecName: "kube-api-access-jqg9m") pod "318aa5db-6b19-4efe-8c5d-00fbb4a84b13" (UID: "318aa5db-6b19-4efe-8c5d-00fbb4a84b13"). InnerVolumeSpecName "kube-api-access-jqg9m". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:12:07 crc kubenswrapper[4753]: I0129 12:12:07.002661 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/318aa5db-6b19-4efe-8c5d-00fbb4a84b13-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "318aa5db-6b19-4efe-8c5d-00fbb4a84b13" (UID: "318aa5db-6b19-4efe-8c5d-00fbb4a84b13"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:12:07 crc kubenswrapper[4753]: I0129 12:12:07.081126 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jqg9m\" (UniqueName: \"kubernetes.io/projected/318aa5db-6b19-4efe-8c5d-00fbb4a84b13-kube-api-access-jqg9m\") on node \"crc\" DevicePath \"\"" Jan 29 12:12:07 crc kubenswrapper[4753]: I0129 12:12:07.081194 4753 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/318aa5db-6b19-4efe-8c5d-00fbb4a84b13-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 12:12:07 crc kubenswrapper[4753]: I0129 12:12:07.081206 4753 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/318aa5db-6b19-4efe-8c5d-00fbb4a84b13-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 12:12:07 crc kubenswrapper[4753]: I0129 12:12:07.119587 4753 generic.go:334] "Generic (PLEG): container finished" podID="318aa5db-6b19-4efe-8c5d-00fbb4a84b13" containerID="7e5b472b91c06d451cc7b21999fbe372bbc0b9e7274b74bb9d6f37e446cbab33" exitCode=0 Jan 29 12:12:07 crc kubenswrapper[4753]: I0129 12:12:07.119679 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7dkr6" event={"ID":"318aa5db-6b19-4efe-8c5d-00fbb4a84b13","Type":"ContainerDied","Data":"7e5b472b91c06d451cc7b21999fbe372bbc0b9e7274b74bb9d6f37e446cbab33"} Jan 29 12:12:07 crc kubenswrapper[4753]: I0129 12:12:07.119735 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7dkr6" event={"ID":"318aa5db-6b19-4efe-8c5d-00fbb4a84b13","Type":"ContainerDied","Data":"f43d1fdc19a8e98625a8968f7cb0a82276aafd99c8212a9ee92cd8b070de1f47"} Jan 29 12:12:07 crc kubenswrapper[4753]: I0129 12:12:07.119762 4753 scope.go:117] "RemoveContainer" containerID="7e5b472b91c06d451cc7b21999fbe372bbc0b9e7274b74bb9d6f37e446cbab33" Jan 29 12:12:07 crc kubenswrapper[4753]: I0129 12:12:07.120640 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-7dkr6" Jan 29 12:12:07 crc kubenswrapper[4753]: I0129 12:12:07.156920 4753 scope.go:117] "RemoveContainer" containerID="451894115edc3e13f3ac8ed812be9d2d2711bdfb9dbacf7d456c803b3d386f87" Jan 29 12:12:07 crc kubenswrapper[4753]: I0129 12:12:07.167866 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-7dkr6"] Jan 29 12:12:07 crc kubenswrapper[4753]: I0129 12:12:07.172992 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-7dkr6"] Jan 29 12:12:07 crc kubenswrapper[4753]: I0129 12:12:07.182648 4753 scope.go:117] "RemoveContainer" containerID="cc09a810c4a879ba64353b140c24967cc6702f73a203166a1a4d66e7208f8fb3" Jan 29 12:12:07 crc kubenswrapper[4753]: I0129 12:12:07.205741 4753 scope.go:117] "RemoveContainer" containerID="7e5b472b91c06d451cc7b21999fbe372bbc0b9e7274b74bb9d6f37e446cbab33" Jan 29 12:12:07 crc kubenswrapper[4753]: E0129 12:12:07.206584 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7e5b472b91c06d451cc7b21999fbe372bbc0b9e7274b74bb9d6f37e446cbab33\": container with ID starting with 7e5b472b91c06d451cc7b21999fbe372bbc0b9e7274b74bb9d6f37e446cbab33 not found: ID does not exist" containerID="7e5b472b91c06d451cc7b21999fbe372bbc0b9e7274b74bb9d6f37e446cbab33" Jan 29 12:12:07 crc kubenswrapper[4753]: I0129 12:12:07.206639 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7e5b472b91c06d451cc7b21999fbe372bbc0b9e7274b74bb9d6f37e446cbab33"} err="failed to get container status \"7e5b472b91c06d451cc7b21999fbe372bbc0b9e7274b74bb9d6f37e446cbab33\": rpc error: code = NotFound desc = could not find container \"7e5b472b91c06d451cc7b21999fbe372bbc0b9e7274b74bb9d6f37e446cbab33\": container with ID starting with 7e5b472b91c06d451cc7b21999fbe372bbc0b9e7274b74bb9d6f37e446cbab33 not found: ID does not exist" Jan 29 12:12:07 crc kubenswrapper[4753]: I0129 12:12:07.206680 4753 scope.go:117] "RemoveContainer" containerID="451894115edc3e13f3ac8ed812be9d2d2711bdfb9dbacf7d456c803b3d386f87" Jan 29 12:12:07 crc kubenswrapper[4753]: E0129 12:12:07.207290 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"451894115edc3e13f3ac8ed812be9d2d2711bdfb9dbacf7d456c803b3d386f87\": container with ID starting with 451894115edc3e13f3ac8ed812be9d2d2711bdfb9dbacf7d456c803b3d386f87 not found: ID does not exist" containerID="451894115edc3e13f3ac8ed812be9d2d2711bdfb9dbacf7d456c803b3d386f87" Jan 29 12:12:07 crc kubenswrapper[4753]: I0129 12:12:07.207320 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"451894115edc3e13f3ac8ed812be9d2d2711bdfb9dbacf7d456c803b3d386f87"} err="failed to get container status \"451894115edc3e13f3ac8ed812be9d2d2711bdfb9dbacf7d456c803b3d386f87\": rpc error: code = NotFound desc = could not find container \"451894115edc3e13f3ac8ed812be9d2d2711bdfb9dbacf7d456c803b3d386f87\": container with ID starting with 451894115edc3e13f3ac8ed812be9d2d2711bdfb9dbacf7d456c803b3d386f87 not found: ID does not exist" Jan 29 12:12:07 crc kubenswrapper[4753]: I0129 12:12:07.207338 4753 scope.go:117] "RemoveContainer" containerID="cc09a810c4a879ba64353b140c24967cc6702f73a203166a1a4d66e7208f8fb3" Jan 29 12:12:07 crc kubenswrapper[4753]: E0129 12:12:07.207835 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cc09a810c4a879ba64353b140c24967cc6702f73a203166a1a4d66e7208f8fb3\": container with ID starting with cc09a810c4a879ba64353b140c24967cc6702f73a203166a1a4d66e7208f8fb3 not found: ID does not exist" containerID="cc09a810c4a879ba64353b140c24967cc6702f73a203166a1a4d66e7208f8fb3" Jan 29 12:12:07 crc kubenswrapper[4753]: I0129 12:12:07.207873 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cc09a810c4a879ba64353b140c24967cc6702f73a203166a1a4d66e7208f8fb3"} err="failed to get container status \"cc09a810c4a879ba64353b140c24967cc6702f73a203166a1a4d66e7208f8fb3\": rpc error: code = NotFound desc = could not find container \"cc09a810c4a879ba64353b140c24967cc6702f73a203166a1a4d66e7208f8fb3\": container with ID starting with cc09a810c4a879ba64353b140c24967cc6702f73a203166a1a4d66e7208f8fb3 not found: ID does not exist" Jan 29 12:12:07 crc kubenswrapper[4753]: I0129 12:12:07.673416 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-5clxb"] Jan 29 12:12:07 crc kubenswrapper[4753]: I0129 12:12:07.673732 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-5clxb" podUID="574faad5-0a82-4ff3-b0a8-5390bfd3dc27" containerName="registry-server" containerID="cri-o://ac2f0ade620830ba2b7f7881563fefad779b1fb0b8490876d6338c021f3e14f5" gracePeriod=2 Jan 29 12:12:08 crc kubenswrapper[4753]: I0129 12:12:08.046376 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="318aa5db-6b19-4efe-8c5d-00fbb4a84b13" path="/var/lib/kubelet/pods/318aa5db-6b19-4efe-8c5d-00fbb4a84b13/volumes" Jan 29 12:12:08 crc kubenswrapper[4753]: I0129 12:12:08.877021 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-bsxpd"] Jan 29 12:12:08 crc kubenswrapper[4753]: I0129 12:12:08.877678 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-bsxpd" podUID="ae52688b-6f7a-441f-927b-ab547b7ce44f" containerName="registry-server" containerID="cri-o://52f05030c12ab9e8660e14b119e79d18cd261ba1485966914b56eb53990558c5" gracePeriod=2 Jan 29 12:12:08 crc kubenswrapper[4753]: I0129 12:12:08.952153 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5clxb" Jan 29 12:12:09 crc kubenswrapper[4753]: I0129 12:12:09.050034 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-62gc5\" (UniqueName: \"kubernetes.io/projected/574faad5-0a82-4ff3-b0a8-5390bfd3dc27-kube-api-access-62gc5\") pod \"574faad5-0a82-4ff3-b0a8-5390bfd3dc27\" (UID: \"574faad5-0a82-4ff3-b0a8-5390bfd3dc27\") " Jan 29 12:12:09 crc kubenswrapper[4753]: I0129 12:12:09.050184 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/574faad5-0a82-4ff3-b0a8-5390bfd3dc27-utilities\") pod \"574faad5-0a82-4ff3-b0a8-5390bfd3dc27\" (UID: \"574faad5-0a82-4ff3-b0a8-5390bfd3dc27\") " Jan 29 12:12:09 crc kubenswrapper[4753]: I0129 12:12:09.050328 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/574faad5-0a82-4ff3-b0a8-5390bfd3dc27-catalog-content\") pod \"574faad5-0a82-4ff3-b0a8-5390bfd3dc27\" (UID: \"574faad5-0a82-4ff3-b0a8-5390bfd3dc27\") " Jan 29 12:12:09 crc kubenswrapper[4753]: I0129 12:12:09.051093 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/574faad5-0a82-4ff3-b0a8-5390bfd3dc27-utilities" (OuterVolumeSpecName: "utilities") pod "574faad5-0a82-4ff3-b0a8-5390bfd3dc27" (UID: "574faad5-0a82-4ff3-b0a8-5390bfd3dc27"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:12:09 crc kubenswrapper[4753]: I0129 12:12:09.055468 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/574faad5-0a82-4ff3-b0a8-5390bfd3dc27-kube-api-access-62gc5" (OuterVolumeSpecName: "kube-api-access-62gc5") pod "574faad5-0a82-4ff3-b0a8-5390bfd3dc27" (UID: "574faad5-0a82-4ff3-b0a8-5390bfd3dc27"). InnerVolumeSpecName "kube-api-access-62gc5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:12:09 crc kubenswrapper[4753]: I0129 12:12:09.100542 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/574faad5-0a82-4ff3-b0a8-5390bfd3dc27-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "574faad5-0a82-4ff3-b0a8-5390bfd3dc27" (UID: "574faad5-0a82-4ff3-b0a8-5390bfd3dc27"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:12:09 crc kubenswrapper[4753]: I0129 12:12:09.154347 4753 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/574faad5-0a82-4ff3-b0a8-5390bfd3dc27-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 12:12:09 crc kubenswrapper[4753]: I0129 12:12:09.154385 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-62gc5\" (UniqueName: \"kubernetes.io/projected/574faad5-0a82-4ff3-b0a8-5390bfd3dc27-kube-api-access-62gc5\") on node \"crc\" DevicePath \"\"" Jan 29 12:12:09 crc kubenswrapper[4753]: I0129 12:12:09.154399 4753 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/574faad5-0a82-4ff3-b0a8-5390bfd3dc27-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 12:12:09 crc kubenswrapper[4753]: I0129 12:12:09.154849 4753 generic.go:334] "Generic (PLEG): container finished" podID="574faad5-0a82-4ff3-b0a8-5390bfd3dc27" containerID="ac2f0ade620830ba2b7f7881563fefad779b1fb0b8490876d6338c021f3e14f5" exitCode=0 Jan 29 12:12:09 crc kubenswrapper[4753]: I0129 12:12:09.155037 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5clxb" Jan 29 12:12:09 crc kubenswrapper[4753]: I0129 12:12:09.159942 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5clxb" event={"ID":"574faad5-0a82-4ff3-b0a8-5390bfd3dc27","Type":"ContainerDied","Data":"ac2f0ade620830ba2b7f7881563fefad779b1fb0b8490876d6338c021f3e14f5"} Jan 29 12:12:09 crc kubenswrapper[4753]: I0129 12:12:09.160046 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5clxb" event={"ID":"574faad5-0a82-4ff3-b0a8-5390bfd3dc27","Type":"ContainerDied","Data":"baf50df47fdfe9c1a3c918397fb285aa5e8261643498e8b8b9634d887abe8076"} Jan 29 12:12:09 crc kubenswrapper[4753]: I0129 12:12:09.160074 4753 scope.go:117] "RemoveContainer" containerID="ac2f0ade620830ba2b7f7881563fefad779b1fb0b8490876d6338c021f3e14f5" Jan 29 12:12:09 crc kubenswrapper[4753]: I0129 12:12:09.172058 4753 generic.go:334] "Generic (PLEG): container finished" podID="ae52688b-6f7a-441f-927b-ab547b7ce44f" containerID="52f05030c12ab9e8660e14b119e79d18cd261ba1485966914b56eb53990558c5" exitCode=0 Jan 29 12:12:09 crc kubenswrapper[4753]: I0129 12:12:09.172172 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bsxpd" event={"ID":"ae52688b-6f7a-441f-927b-ab547b7ce44f","Type":"ContainerDied","Data":"52f05030c12ab9e8660e14b119e79d18cd261ba1485966914b56eb53990558c5"} Jan 29 12:12:09 crc kubenswrapper[4753]: I0129 12:12:09.187503 4753 scope.go:117] "RemoveContainer" containerID="709fb49800c42dec000456fee3a98f0284de9438dd16d8dc91bfa0915053f2bc" Jan 29 12:12:09 crc kubenswrapper[4753]: I0129 12:12:09.195346 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-5clxb"] Jan 29 12:12:09 crc kubenswrapper[4753]: I0129 12:12:09.204750 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-5clxb"] Jan 29 12:12:09 crc kubenswrapper[4753]: I0129 12:12:09.234521 4753 scope.go:117] "RemoveContainer" containerID="e0c6201cc9ca175420256e1854850c196a5f5fc6725baaf71ea229bb97deb642" Jan 29 12:12:09 crc kubenswrapper[4753]: I0129 12:12:09.260059 4753 scope.go:117] "RemoveContainer" containerID="ac2f0ade620830ba2b7f7881563fefad779b1fb0b8490876d6338c021f3e14f5" Jan 29 12:12:09 crc kubenswrapper[4753]: E0129 12:12:09.264760 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ac2f0ade620830ba2b7f7881563fefad779b1fb0b8490876d6338c021f3e14f5\": container with ID starting with ac2f0ade620830ba2b7f7881563fefad779b1fb0b8490876d6338c021f3e14f5 not found: ID does not exist" containerID="ac2f0ade620830ba2b7f7881563fefad779b1fb0b8490876d6338c021f3e14f5" Jan 29 12:12:09 crc kubenswrapper[4753]: I0129 12:12:09.264841 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ac2f0ade620830ba2b7f7881563fefad779b1fb0b8490876d6338c021f3e14f5"} err="failed to get container status \"ac2f0ade620830ba2b7f7881563fefad779b1fb0b8490876d6338c021f3e14f5\": rpc error: code = NotFound desc = could not find container \"ac2f0ade620830ba2b7f7881563fefad779b1fb0b8490876d6338c021f3e14f5\": container with ID starting with ac2f0ade620830ba2b7f7881563fefad779b1fb0b8490876d6338c021f3e14f5 not found: ID does not exist" Jan 29 12:12:09 crc kubenswrapper[4753]: I0129 12:12:09.264900 4753 scope.go:117] "RemoveContainer" containerID="709fb49800c42dec000456fee3a98f0284de9438dd16d8dc91bfa0915053f2bc" Jan 29 12:12:09 crc kubenswrapper[4753]: E0129 12:12:09.265520 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"709fb49800c42dec000456fee3a98f0284de9438dd16d8dc91bfa0915053f2bc\": container with ID starting with 709fb49800c42dec000456fee3a98f0284de9438dd16d8dc91bfa0915053f2bc not found: ID does not exist" containerID="709fb49800c42dec000456fee3a98f0284de9438dd16d8dc91bfa0915053f2bc" Jan 29 12:12:09 crc kubenswrapper[4753]: I0129 12:12:09.265544 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"709fb49800c42dec000456fee3a98f0284de9438dd16d8dc91bfa0915053f2bc"} err="failed to get container status \"709fb49800c42dec000456fee3a98f0284de9438dd16d8dc91bfa0915053f2bc\": rpc error: code = NotFound desc = could not find container \"709fb49800c42dec000456fee3a98f0284de9438dd16d8dc91bfa0915053f2bc\": container with ID starting with 709fb49800c42dec000456fee3a98f0284de9438dd16d8dc91bfa0915053f2bc not found: ID does not exist" Jan 29 12:12:09 crc kubenswrapper[4753]: I0129 12:12:09.265564 4753 scope.go:117] "RemoveContainer" containerID="e0c6201cc9ca175420256e1854850c196a5f5fc6725baaf71ea229bb97deb642" Jan 29 12:12:09 crc kubenswrapper[4753]: E0129 12:12:09.265862 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e0c6201cc9ca175420256e1854850c196a5f5fc6725baaf71ea229bb97deb642\": container with ID starting with e0c6201cc9ca175420256e1854850c196a5f5fc6725baaf71ea229bb97deb642 not found: ID does not exist" containerID="e0c6201cc9ca175420256e1854850c196a5f5fc6725baaf71ea229bb97deb642" Jan 29 12:12:09 crc kubenswrapper[4753]: I0129 12:12:09.265899 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e0c6201cc9ca175420256e1854850c196a5f5fc6725baaf71ea229bb97deb642"} err="failed to get container status \"e0c6201cc9ca175420256e1854850c196a5f5fc6725baaf71ea229bb97deb642\": rpc error: code = NotFound desc = could not find container \"e0c6201cc9ca175420256e1854850c196a5f5fc6725baaf71ea229bb97deb642\": container with ID starting with e0c6201cc9ca175420256e1854850c196a5f5fc6725baaf71ea229bb97deb642 not found: ID does not exist" Jan 29 12:12:09 crc kubenswrapper[4753]: I0129 12:12:09.619387 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bsxpd" Jan 29 12:12:09 crc kubenswrapper[4753]: I0129 12:12:09.807375 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ae52688b-6f7a-441f-927b-ab547b7ce44f-catalog-content\") pod \"ae52688b-6f7a-441f-927b-ab547b7ce44f\" (UID: \"ae52688b-6f7a-441f-927b-ab547b7ce44f\") " Jan 29 12:12:09 crc kubenswrapper[4753]: I0129 12:12:09.807653 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkq7z\" (UniqueName: \"kubernetes.io/projected/ae52688b-6f7a-441f-927b-ab547b7ce44f-kube-api-access-zkq7z\") pod \"ae52688b-6f7a-441f-927b-ab547b7ce44f\" (UID: \"ae52688b-6f7a-441f-927b-ab547b7ce44f\") " Jan 29 12:12:09 crc kubenswrapper[4753]: I0129 12:12:09.807692 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ae52688b-6f7a-441f-927b-ab547b7ce44f-utilities\") pod \"ae52688b-6f7a-441f-927b-ab547b7ce44f\" (UID: \"ae52688b-6f7a-441f-927b-ab547b7ce44f\") " Jan 29 12:12:09 crc kubenswrapper[4753]: I0129 12:12:09.809061 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ae52688b-6f7a-441f-927b-ab547b7ce44f-utilities" (OuterVolumeSpecName: "utilities") pod "ae52688b-6f7a-441f-927b-ab547b7ce44f" (UID: "ae52688b-6f7a-441f-927b-ab547b7ce44f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:12:09 crc kubenswrapper[4753]: I0129 12:12:09.813879 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ae52688b-6f7a-441f-927b-ab547b7ce44f-kube-api-access-zkq7z" (OuterVolumeSpecName: "kube-api-access-zkq7z") pod "ae52688b-6f7a-441f-927b-ab547b7ce44f" (UID: "ae52688b-6f7a-441f-927b-ab547b7ce44f"). InnerVolumeSpecName "kube-api-access-zkq7z". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:12:09 crc kubenswrapper[4753]: I0129 12:12:09.862605 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ae52688b-6f7a-441f-927b-ab547b7ce44f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ae52688b-6f7a-441f-927b-ab547b7ce44f" (UID: "ae52688b-6f7a-441f-927b-ab547b7ce44f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:12:09 crc kubenswrapper[4753]: I0129 12:12:09.898273 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="574faad5-0a82-4ff3-b0a8-5390bfd3dc27" path="/var/lib/kubelet/pods/574faad5-0a82-4ff3-b0a8-5390bfd3dc27/volumes" Jan 29 12:12:09 crc kubenswrapper[4753]: I0129 12:12:09.909465 4753 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ae52688b-6f7a-441f-927b-ab547b7ce44f-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 12:12:09 crc kubenswrapper[4753]: I0129 12:12:09.909512 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkq7z\" (UniqueName: \"kubernetes.io/projected/ae52688b-6f7a-441f-927b-ab547b7ce44f-kube-api-access-zkq7z\") on node \"crc\" DevicePath \"\"" Jan 29 12:12:09 crc kubenswrapper[4753]: I0129 12:12:09.909536 4753 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ae52688b-6f7a-441f-927b-ab547b7ce44f-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 12:12:10 crc kubenswrapper[4753]: I0129 12:12:10.190458 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bsxpd" event={"ID":"ae52688b-6f7a-441f-927b-ab547b7ce44f","Type":"ContainerDied","Data":"cba8692c5f0bccf235c4375147b2e3d5b3415e156cfb7de056059dfbafba39af"} Jan 29 12:12:10 crc kubenswrapper[4753]: I0129 12:12:10.190533 4753 scope.go:117] "RemoveContainer" containerID="52f05030c12ab9e8660e14b119e79d18cd261ba1485966914b56eb53990558c5" Jan 29 12:12:10 crc kubenswrapper[4753]: I0129 12:12:10.190531 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bsxpd" Jan 29 12:12:10 crc kubenswrapper[4753]: I0129 12:12:10.215371 4753 scope.go:117] "RemoveContainer" containerID="5432dfc5e349fdc2fce3a60c429c9129f30518e3f8c21a676dc46b7d8cc1879c" Jan 29 12:12:10 crc kubenswrapper[4753]: I0129 12:12:10.216795 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-bsxpd"] Jan 29 12:12:10 crc kubenswrapper[4753]: I0129 12:12:10.225946 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-bsxpd"] Jan 29 12:12:10 crc kubenswrapper[4753]: I0129 12:12:10.241006 4753 scope.go:117] "RemoveContainer" containerID="2adff0c5060cee3a66c1e74eec771087277951d3650b517c6a798dca255f979e" Jan 29 12:12:10 crc kubenswrapper[4753]: I0129 12:12:10.272855 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-z6xk4"] Jan 29 12:12:10 crc kubenswrapper[4753]: I0129 12:12:10.273135 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-z6xk4" podUID="d0809beb-ae87-4bf7-aa2d-20dbe819c3cc" containerName="registry-server" containerID="cri-o://a3cd9f298e84306e0b3dc8b1b3f5f7d784c3b06714ab49dc7b77251639ad2000" gracePeriod=2 Jan 29 12:12:10 crc kubenswrapper[4753]: I0129 12:12:10.766731 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-z6xk4" Jan 29 12:12:10 crc kubenswrapper[4753]: I0129 12:12:10.931170 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d0809beb-ae87-4bf7-aa2d-20dbe819c3cc-catalog-content\") pod \"d0809beb-ae87-4bf7-aa2d-20dbe819c3cc\" (UID: \"d0809beb-ae87-4bf7-aa2d-20dbe819c3cc\") " Jan 29 12:12:10 crc kubenswrapper[4753]: I0129 12:12:10.931260 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d0809beb-ae87-4bf7-aa2d-20dbe819c3cc-utilities\") pod \"d0809beb-ae87-4bf7-aa2d-20dbe819c3cc\" (UID: \"d0809beb-ae87-4bf7-aa2d-20dbe819c3cc\") " Jan 29 12:12:10 crc kubenswrapper[4753]: I0129 12:12:10.931393 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gmml4\" (UniqueName: \"kubernetes.io/projected/d0809beb-ae87-4bf7-aa2d-20dbe819c3cc-kube-api-access-gmml4\") pod \"d0809beb-ae87-4bf7-aa2d-20dbe819c3cc\" (UID: \"d0809beb-ae87-4bf7-aa2d-20dbe819c3cc\") " Jan 29 12:12:10 crc kubenswrapper[4753]: I0129 12:12:10.932471 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d0809beb-ae87-4bf7-aa2d-20dbe819c3cc-utilities" (OuterVolumeSpecName: "utilities") pod "d0809beb-ae87-4bf7-aa2d-20dbe819c3cc" (UID: "d0809beb-ae87-4bf7-aa2d-20dbe819c3cc"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:12:10 crc kubenswrapper[4753]: I0129 12:12:10.939184 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d0809beb-ae87-4bf7-aa2d-20dbe819c3cc-kube-api-access-gmml4" (OuterVolumeSpecName: "kube-api-access-gmml4") pod "d0809beb-ae87-4bf7-aa2d-20dbe819c3cc" (UID: "d0809beb-ae87-4bf7-aa2d-20dbe819c3cc"). InnerVolumeSpecName "kube-api-access-gmml4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:12:11 crc kubenswrapper[4753]: I0129 12:12:11.033381 4753 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d0809beb-ae87-4bf7-aa2d-20dbe819c3cc-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 12:12:11 crc kubenswrapper[4753]: I0129 12:12:11.033423 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gmml4\" (UniqueName: \"kubernetes.io/projected/d0809beb-ae87-4bf7-aa2d-20dbe819c3cc-kube-api-access-gmml4\") on node \"crc\" DevicePath \"\"" Jan 29 12:12:11 crc kubenswrapper[4753]: I0129 12:12:11.275416 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d0809beb-ae87-4bf7-aa2d-20dbe819c3cc-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d0809beb-ae87-4bf7-aa2d-20dbe819c3cc" (UID: "d0809beb-ae87-4bf7-aa2d-20dbe819c3cc"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:12:11 crc kubenswrapper[4753]: I0129 12:12:11.278786 4753 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d0809beb-ae87-4bf7-aa2d-20dbe819c3cc-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 12:12:11 crc kubenswrapper[4753]: I0129 12:12:11.293045 4753 generic.go:334] "Generic (PLEG): container finished" podID="d0809beb-ae87-4bf7-aa2d-20dbe819c3cc" containerID="a3cd9f298e84306e0b3dc8b1b3f5f7d784c3b06714ab49dc7b77251639ad2000" exitCode=0 Jan 29 12:12:11 crc kubenswrapper[4753]: I0129 12:12:11.293146 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-z6xk4" event={"ID":"d0809beb-ae87-4bf7-aa2d-20dbe819c3cc","Type":"ContainerDied","Data":"a3cd9f298e84306e0b3dc8b1b3f5f7d784c3b06714ab49dc7b77251639ad2000"} Jan 29 12:12:11 crc kubenswrapper[4753]: I0129 12:12:11.293199 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-z6xk4" event={"ID":"d0809beb-ae87-4bf7-aa2d-20dbe819c3cc","Type":"ContainerDied","Data":"cfe310ce50ed328fbce042aaec73f6fc4aefee5317448fb747418fd8b4431eaf"} Jan 29 12:12:11 crc kubenswrapper[4753]: I0129 12:12:11.293220 4753 scope.go:117] "RemoveContainer" containerID="a3cd9f298e84306e0b3dc8b1b3f5f7d784c3b06714ab49dc7b77251639ad2000" Jan 29 12:12:11 crc kubenswrapper[4753]: I0129 12:12:11.293391 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-z6xk4" Jan 29 12:12:11 crc kubenswrapper[4753]: I0129 12:12:11.331093 4753 scope.go:117] "RemoveContainer" containerID="e7d2dbf70e86b9f470e3feb4e7b019ea755da1a818acc3e33e140b506b174c6e" Jan 29 12:12:11 crc kubenswrapper[4753]: I0129 12:12:11.356321 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-z6xk4"] Jan 29 12:12:11 crc kubenswrapper[4753]: I0129 12:12:11.389212 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-z6xk4"] Jan 29 12:12:11 crc kubenswrapper[4753]: I0129 12:12:11.408882 4753 scope.go:117] "RemoveContainer" containerID="05fa1781d9365a5911b342bc5dc122c3b11fc628bfd851f08cca687d70e73d40" Jan 29 12:12:11 crc kubenswrapper[4753]: I0129 12:12:11.438637 4753 scope.go:117] "RemoveContainer" containerID="a3cd9f298e84306e0b3dc8b1b3f5f7d784c3b06714ab49dc7b77251639ad2000" Jan 29 12:12:11 crc kubenswrapper[4753]: E0129 12:12:11.444374 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a3cd9f298e84306e0b3dc8b1b3f5f7d784c3b06714ab49dc7b77251639ad2000\": container with ID starting with a3cd9f298e84306e0b3dc8b1b3f5f7d784c3b06714ab49dc7b77251639ad2000 not found: ID does not exist" containerID="a3cd9f298e84306e0b3dc8b1b3f5f7d784c3b06714ab49dc7b77251639ad2000" Jan 29 12:12:11 crc kubenswrapper[4753]: I0129 12:12:11.444481 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a3cd9f298e84306e0b3dc8b1b3f5f7d784c3b06714ab49dc7b77251639ad2000"} err="failed to get container status \"a3cd9f298e84306e0b3dc8b1b3f5f7d784c3b06714ab49dc7b77251639ad2000\": rpc error: code = NotFound desc = could not find container \"a3cd9f298e84306e0b3dc8b1b3f5f7d784c3b06714ab49dc7b77251639ad2000\": container with ID starting with a3cd9f298e84306e0b3dc8b1b3f5f7d784c3b06714ab49dc7b77251639ad2000 not found: ID does not exist" Jan 29 12:12:11 crc kubenswrapper[4753]: I0129 12:12:11.444547 4753 scope.go:117] "RemoveContainer" containerID="e7d2dbf70e86b9f470e3feb4e7b019ea755da1a818acc3e33e140b506b174c6e" Jan 29 12:12:11 crc kubenswrapper[4753]: E0129 12:12:11.445154 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e7d2dbf70e86b9f470e3feb4e7b019ea755da1a818acc3e33e140b506b174c6e\": container with ID starting with e7d2dbf70e86b9f470e3feb4e7b019ea755da1a818acc3e33e140b506b174c6e not found: ID does not exist" containerID="e7d2dbf70e86b9f470e3feb4e7b019ea755da1a818acc3e33e140b506b174c6e" Jan 29 12:12:11 crc kubenswrapper[4753]: I0129 12:12:11.445197 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e7d2dbf70e86b9f470e3feb4e7b019ea755da1a818acc3e33e140b506b174c6e"} err="failed to get container status \"e7d2dbf70e86b9f470e3feb4e7b019ea755da1a818acc3e33e140b506b174c6e\": rpc error: code = NotFound desc = could not find container \"e7d2dbf70e86b9f470e3feb4e7b019ea755da1a818acc3e33e140b506b174c6e\": container with ID starting with e7d2dbf70e86b9f470e3feb4e7b019ea755da1a818acc3e33e140b506b174c6e not found: ID does not exist" Jan 29 12:12:11 crc kubenswrapper[4753]: I0129 12:12:11.445247 4753 scope.go:117] "RemoveContainer" containerID="05fa1781d9365a5911b342bc5dc122c3b11fc628bfd851f08cca687d70e73d40" Jan 29 12:12:11 crc kubenswrapper[4753]: E0129 12:12:11.445918 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"05fa1781d9365a5911b342bc5dc122c3b11fc628bfd851f08cca687d70e73d40\": container with ID starting with 05fa1781d9365a5911b342bc5dc122c3b11fc628bfd851f08cca687d70e73d40 not found: ID does not exist" containerID="05fa1781d9365a5911b342bc5dc122c3b11fc628bfd851f08cca687d70e73d40" Jan 29 12:12:11 crc kubenswrapper[4753]: I0129 12:12:11.445946 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"05fa1781d9365a5911b342bc5dc122c3b11fc628bfd851f08cca687d70e73d40"} err="failed to get container status \"05fa1781d9365a5911b342bc5dc122c3b11fc628bfd851f08cca687d70e73d40\": rpc error: code = NotFound desc = could not find container \"05fa1781d9365a5911b342bc5dc122c3b11fc628bfd851f08cca687d70e73d40\": container with ID starting with 05fa1781d9365a5911b342bc5dc122c3b11fc628bfd851f08cca687d70e73d40 not found: ID does not exist" Jan 29 12:12:11 crc kubenswrapper[4753]: I0129 12:12:11.897416 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ae52688b-6f7a-441f-927b-ab547b7ce44f" path="/var/lib/kubelet/pods/ae52688b-6f7a-441f-927b-ab547b7ce44f/volumes" Jan 29 12:12:11 crc kubenswrapper[4753]: I0129 12:12:11.898626 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d0809beb-ae87-4bf7-aa2d-20dbe819c3cc" path="/var/lib/kubelet/pods/d0809beb-ae87-4bf7-aa2d-20dbe819c3cc/volumes" Jan 29 12:12:13 crc kubenswrapper[4753]: I0129 12:12:13.506880 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-zdhjn"] Jan 29 12:12:32 crc kubenswrapper[4753]: I0129 12:12:32.083053 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-844bbdf588-qdt2h"] Jan 29 12:12:32 crc kubenswrapper[4753]: I0129 12:12:32.084008 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-844bbdf588-qdt2h" podUID="10e48cb3-f357-49bd-8eee-a576c8814487" containerName="controller-manager" containerID="cri-o://252f4e2e3424b357db5c66af56852ed8dd6c17586bbf3fb35216f8cf1954cab3" gracePeriod=30 Jan 29 12:12:32 crc kubenswrapper[4753]: I0129 12:12:32.109004 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-c8bcc84b6-vdbzf"] Jan 29 12:12:32 crc kubenswrapper[4753]: I0129 12:12:32.109278 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-c8bcc84b6-vdbzf" podUID="6958c11f-6d19-4cd0-be14-e796f1df37ee" containerName="route-controller-manager" containerID="cri-o://4c75a76ce4e80a4a8fbd77a8e8cd5947963eca8a13120cf1b19a46306c9d5376" gracePeriod=30 Jan 29 12:12:33 crc kubenswrapper[4753]: I0129 12:12:33.112868 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-844bbdf588-qdt2h" Jan 29 12:12:33 crc kubenswrapper[4753]: I0129 12:12:33.116014 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-c8bcc84b6-vdbzf" Jan 29 12:12:33 crc kubenswrapper[4753]: I0129 12:12:33.151163 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-589f98ccbb-99gvd"] Jan 29 12:12:33 crc kubenswrapper[4753]: E0129 12:12:33.151503 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="318aa5db-6b19-4efe-8c5d-00fbb4a84b13" containerName="registry-server" Jan 29 12:12:33 crc kubenswrapper[4753]: I0129 12:12:33.151522 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="318aa5db-6b19-4efe-8c5d-00fbb4a84b13" containerName="registry-server" Jan 29 12:12:33 crc kubenswrapper[4753]: E0129 12:12:33.151533 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d0809beb-ae87-4bf7-aa2d-20dbe819c3cc" containerName="extract-utilities" Jan 29 12:12:33 crc kubenswrapper[4753]: I0129 12:12:33.151540 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="d0809beb-ae87-4bf7-aa2d-20dbe819c3cc" containerName="extract-utilities" Jan 29 12:12:33 crc kubenswrapper[4753]: E0129 12:12:33.151550 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="318aa5db-6b19-4efe-8c5d-00fbb4a84b13" containerName="extract-utilities" Jan 29 12:12:33 crc kubenswrapper[4753]: I0129 12:12:33.151561 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="318aa5db-6b19-4efe-8c5d-00fbb4a84b13" containerName="extract-utilities" Jan 29 12:12:33 crc kubenswrapper[4753]: E0129 12:12:33.151575 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="318aa5db-6b19-4efe-8c5d-00fbb4a84b13" containerName="extract-content" Jan 29 12:12:33 crc kubenswrapper[4753]: I0129 12:12:33.151583 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="318aa5db-6b19-4efe-8c5d-00fbb4a84b13" containerName="extract-content" Jan 29 12:12:33 crc kubenswrapper[4753]: E0129 12:12:33.151592 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="10e48cb3-f357-49bd-8eee-a576c8814487" containerName="controller-manager" Jan 29 12:12:33 crc kubenswrapper[4753]: I0129 12:12:33.151601 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="10e48cb3-f357-49bd-8eee-a576c8814487" containerName="controller-manager" Jan 29 12:12:33 crc kubenswrapper[4753]: E0129 12:12:33.151613 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6958c11f-6d19-4cd0-be14-e796f1df37ee" containerName="route-controller-manager" Jan 29 12:12:33 crc kubenswrapper[4753]: I0129 12:12:33.151621 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="6958c11f-6d19-4cd0-be14-e796f1df37ee" containerName="route-controller-manager" Jan 29 12:12:33 crc kubenswrapper[4753]: E0129 12:12:33.151632 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ae52688b-6f7a-441f-927b-ab547b7ce44f" containerName="extract-content" Jan 29 12:12:33 crc kubenswrapper[4753]: I0129 12:12:33.151638 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="ae52688b-6f7a-441f-927b-ab547b7ce44f" containerName="extract-content" Jan 29 12:12:33 crc kubenswrapper[4753]: E0129 12:12:33.151647 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="574faad5-0a82-4ff3-b0a8-5390bfd3dc27" containerName="extract-utilities" Jan 29 12:12:33 crc kubenswrapper[4753]: I0129 12:12:33.151654 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="574faad5-0a82-4ff3-b0a8-5390bfd3dc27" containerName="extract-utilities" Jan 29 12:12:33 crc kubenswrapper[4753]: E0129 12:12:33.151667 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ae52688b-6f7a-441f-927b-ab547b7ce44f" containerName="extract-utilities" Jan 29 12:12:33 crc kubenswrapper[4753]: I0129 12:12:33.151673 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="ae52688b-6f7a-441f-927b-ab547b7ce44f" containerName="extract-utilities" Jan 29 12:12:33 crc kubenswrapper[4753]: E0129 12:12:33.151685 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ae52688b-6f7a-441f-927b-ab547b7ce44f" containerName="registry-server" Jan 29 12:12:33 crc kubenswrapper[4753]: I0129 12:12:33.151692 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="ae52688b-6f7a-441f-927b-ab547b7ce44f" containerName="registry-server" Jan 29 12:12:33 crc kubenswrapper[4753]: E0129 12:12:33.151703 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d0809beb-ae87-4bf7-aa2d-20dbe819c3cc" containerName="registry-server" Jan 29 12:12:33 crc kubenswrapper[4753]: I0129 12:12:33.151711 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="d0809beb-ae87-4bf7-aa2d-20dbe819c3cc" containerName="registry-server" Jan 29 12:12:33 crc kubenswrapper[4753]: E0129 12:12:33.151722 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="574faad5-0a82-4ff3-b0a8-5390bfd3dc27" containerName="registry-server" Jan 29 12:12:33 crc kubenswrapper[4753]: I0129 12:12:33.151729 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="574faad5-0a82-4ff3-b0a8-5390bfd3dc27" containerName="registry-server" Jan 29 12:12:33 crc kubenswrapper[4753]: E0129 12:12:33.151742 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d0809beb-ae87-4bf7-aa2d-20dbe819c3cc" containerName="extract-content" Jan 29 12:12:33 crc kubenswrapper[4753]: I0129 12:12:33.151750 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="d0809beb-ae87-4bf7-aa2d-20dbe819c3cc" containerName="extract-content" Jan 29 12:12:33 crc kubenswrapper[4753]: E0129 12:12:33.151765 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="574faad5-0a82-4ff3-b0a8-5390bfd3dc27" containerName="extract-content" Jan 29 12:12:33 crc kubenswrapper[4753]: I0129 12:12:33.151772 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="574faad5-0a82-4ff3-b0a8-5390bfd3dc27" containerName="extract-content" Jan 29 12:12:33 crc kubenswrapper[4753]: I0129 12:12:33.151873 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="6958c11f-6d19-4cd0-be14-e796f1df37ee" containerName="route-controller-manager" Jan 29 12:12:33 crc kubenswrapper[4753]: I0129 12:12:33.151881 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="10e48cb3-f357-49bd-8eee-a576c8814487" containerName="controller-manager" Jan 29 12:12:33 crc kubenswrapper[4753]: I0129 12:12:33.151891 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="318aa5db-6b19-4efe-8c5d-00fbb4a84b13" containerName="registry-server" Jan 29 12:12:33 crc kubenswrapper[4753]: I0129 12:12:33.151906 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="574faad5-0a82-4ff3-b0a8-5390bfd3dc27" containerName="registry-server" Jan 29 12:12:33 crc kubenswrapper[4753]: I0129 12:12:33.151914 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="ae52688b-6f7a-441f-927b-ab547b7ce44f" containerName="registry-server" Jan 29 12:12:33 crc kubenswrapper[4753]: I0129 12:12:33.151921 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="d0809beb-ae87-4bf7-aa2d-20dbe819c3cc" containerName="registry-server" Jan 29 12:12:33 crc kubenswrapper[4753]: I0129 12:12:33.152306 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-589f98ccbb-99gvd" Jan 29 12:12:33 crc kubenswrapper[4753]: I0129 12:12:33.158057 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c3488187-a863-4211-be1f-edd74f9a3771-config\") pod \"controller-manager-589f98ccbb-99gvd\" (UID: \"c3488187-a863-4211-be1f-edd74f9a3771\") " pod="openshift-controller-manager/controller-manager-589f98ccbb-99gvd" Jan 29 12:12:33 crc kubenswrapper[4753]: I0129 12:12:33.160611 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2hr5z\" (UniqueName: \"kubernetes.io/projected/c3488187-a863-4211-be1f-edd74f9a3771-kube-api-access-2hr5z\") pod \"controller-manager-589f98ccbb-99gvd\" (UID: \"c3488187-a863-4211-be1f-edd74f9a3771\") " pod="openshift-controller-manager/controller-manager-589f98ccbb-99gvd" Jan 29 12:12:33 crc kubenswrapper[4753]: I0129 12:12:33.160666 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/c3488187-a863-4211-be1f-edd74f9a3771-proxy-ca-bundles\") pod \"controller-manager-589f98ccbb-99gvd\" (UID: \"c3488187-a863-4211-be1f-edd74f9a3771\") " pod="openshift-controller-manager/controller-manager-589f98ccbb-99gvd" Jan 29 12:12:33 crc kubenswrapper[4753]: I0129 12:12:33.160746 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c3488187-a863-4211-be1f-edd74f9a3771-serving-cert\") pod \"controller-manager-589f98ccbb-99gvd\" (UID: \"c3488187-a863-4211-be1f-edd74f9a3771\") " pod="openshift-controller-manager/controller-manager-589f98ccbb-99gvd" Jan 29 12:12:33 crc kubenswrapper[4753]: I0129 12:12:33.160804 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c3488187-a863-4211-be1f-edd74f9a3771-client-ca\") pod \"controller-manager-589f98ccbb-99gvd\" (UID: \"c3488187-a863-4211-be1f-edd74f9a3771\") " pod="openshift-controller-manager/controller-manager-589f98ccbb-99gvd" Jan 29 12:12:33 crc kubenswrapper[4753]: I0129 12:12:33.180752 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-589f98ccbb-99gvd"] Jan 29 12:12:33 crc kubenswrapper[4753]: I0129 12:12:33.262744 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6958c11f-6d19-4cd0-be14-e796f1df37ee-serving-cert\") pod \"6958c11f-6d19-4cd0-be14-e796f1df37ee\" (UID: \"6958c11f-6d19-4cd0-be14-e796f1df37ee\") " Jan 29 12:12:33 crc kubenswrapper[4753]: I0129 12:12:33.262849 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/10e48cb3-f357-49bd-8eee-a576c8814487-serving-cert\") pod \"10e48cb3-f357-49bd-8eee-a576c8814487\" (UID: \"10e48cb3-f357-49bd-8eee-a576c8814487\") " Jan 29 12:12:33 crc kubenswrapper[4753]: I0129 12:12:33.262914 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/10e48cb3-f357-49bd-8eee-a576c8814487-proxy-ca-bundles\") pod \"10e48cb3-f357-49bd-8eee-a576c8814487\" (UID: \"10e48cb3-f357-49bd-8eee-a576c8814487\") " Jan 29 12:12:33 crc kubenswrapper[4753]: I0129 12:12:33.262989 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jn78n\" (UniqueName: \"kubernetes.io/projected/6958c11f-6d19-4cd0-be14-e796f1df37ee-kube-api-access-jn78n\") pod \"6958c11f-6d19-4cd0-be14-e796f1df37ee\" (UID: \"6958c11f-6d19-4cd0-be14-e796f1df37ee\") " Jan 29 12:12:33 crc kubenswrapper[4753]: I0129 12:12:33.263029 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/10e48cb3-f357-49bd-8eee-a576c8814487-config\") pod \"10e48cb3-f357-49bd-8eee-a576c8814487\" (UID: \"10e48cb3-f357-49bd-8eee-a576c8814487\") " Jan 29 12:12:33 crc kubenswrapper[4753]: I0129 12:12:33.264142 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/10e48cb3-f357-49bd-8eee-a576c8814487-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "10e48cb3-f357-49bd-8eee-a576c8814487" (UID: "10e48cb3-f357-49bd-8eee-a576c8814487"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:12:33 crc kubenswrapper[4753]: I0129 12:12:33.264295 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/10e48cb3-f357-49bd-8eee-a576c8814487-client-ca\") pod \"10e48cb3-f357-49bd-8eee-a576c8814487\" (UID: \"10e48cb3-f357-49bd-8eee-a576c8814487\") " Jan 29 12:12:33 crc kubenswrapper[4753]: I0129 12:12:33.264507 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8b2qn\" (UniqueName: \"kubernetes.io/projected/10e48cb3-f357-49bd-8eee-a576c8814487-kube-api-access-8b2qn\") pod \"10e48cb3-f357-49bd-8eee-a576c8814487\" (UID: \"10e48cb3-f357-49bd-8eee-a576c8814487\") " Jan 29 12:12:33 crc kubenswrapper[4753]: I0129 12:12:33.264529 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6958c11f-6d19-4cd0-be14-e796f1df37ee-client-ca\") pod \"6958c11f-6d19-4cd0-be14-e796f1df37ee\" (UID: \"6958c11f-6d19-4cd0-be14-e796f1df37ee\") " Jan 29 12:12:33 crc kubenswrapper[4753]: I0129 12:12:33.264581 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6958c11f-6d19-4cd0-be14-e796f1df37ee-config\") pod \"6958c11f-6d19-4cd0-be14-e796f1df37ee\" (UID: \"6958c11f-6d19-4cd0-be14-e796f1df37ee\") " Jan 29 12:12:33 crc kubenswrapper[4753]: I0129 12:12:33.264788 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2hr5z\" (UniqueName: \"kubernetes.io/projected/c3488187-a863-4211-be1f-edd74f9a3771-kube-api-access-2hr5z\") pod \"controller-manager-589f98ccbb-99gvd\" (UID: \"c3488187-a863-4211-be1f-edd74f9a3771\") " pod="openshift-controller-manager/controller-manager-589f98ccbb-99gvd" Jan 29 12:12:33 crc kubenswrapper[4753]: I0129 12:12:33.264828 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/c3488187-a863-4211-be1f-edd74f9a3771-proxy-ca-bundles\") pod \"controller-manager-589f98ccbb-99gvd\" (UID: \"c3488187-a863-4211-be1f-edd74f9a3771\") " pod="openshift-controller-manager/controller-manager-589f98ccbb-99gvd" Jan 29 12:12:33 crc kubenswrapper[4753]: I0129 12:12:33.264876 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/10e48cb3-f357-49bd-8eee-a576c8814487-config" (OuterVolumeSpecName: "config") pod "10e48cb3-f357-49bd-8eee-a576c8814487" (UID: "10e48cb3-f357-49bd-8eee-a576c8814487"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:12:33 crc kubenswrapper[4753]: I0129 12:12:33.264899 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c3488187-a863-4211-be1f-edd74f9a3771-serving-cert\") pod \"controller-manager-589f98ccbb-99gvd\" (UID: \"c3488187-a863-4211-be1f-edd74f9a3771\") " pod="openshift-controller-manager/controller-manager-589f98ccbb-99gvd" Jan 29 12:12:33 crc kubenswrapper[4753]: I0129 12:12:33.264955 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c3488187-a863-4211-be1f-edd74f9a3771-client-ca\") pod \"controller-manager-589f98ccbb-99gvd\" (UID: \"c3488187-a863-4211-be1f-edd74f9a3771\") " pod="openshift-controller-manager/controller-manager-589f98ccbb-99gvd" Jan 29 12:12:33 crc kubenswrapper[4753]: I0129 12:12:33.265063 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c3488187-a863-4211-be1f-edd74f9a3771-config\") pod \"controller-manager-589f98ccbb-99gvd\" (UID: \"c3488187-a863-4211-be1f-edd74f9a3771\") " pod="openshift-controller-manager/controller-manager-589f98ccbb-99gvd" Jan 29 12:12:33 crc kubenswrapper[4753]: I0129 12:12:33.265162 4753 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/10e48cb3-f357-49bd-8eee-a576c8814487-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 29 12:12:33 crc kubenswrapper[4753]: I0129 12:12:33.265204 4753 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/10e48cb3-f357-49bd-8eee-a576c8814487-config\") on node \"crc\" DevicePath \"\"" Jan 29 12:12:33 crc kubenswrapper[4753]: I0129 12:12:33.266071 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6958c11f-6d19-4cd0-be14-e796f1df37ee-config" (OuterVolumeSpecName: "config") pod "6958c11f-6d19-4cd0-be14-e796f1df37ee" (UID: "6958c11f-6d19-4cd0-be14-e796f1df37ee"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:12:33 crc kubenswrapper[4753]: I0129 12:12:33.266330 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6958c11f-6d19-4cd0-be14-e796f1df37ee-client-ca" (OuterVolumeSpecName: "client-ca") pod "6958c11f-6d19-4cd0-be14-e796f1df37ee" (UID: "6958c11f-6d19-4cd0-be14-e796f1df37ee"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:12:33 crc kubenswrapper[4753]: I0129 12:12:33.266953 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c3488187-a863-4211-be1f-edd74f9a3771-config\") pod \"controller-manager-589f98ccbb-99gvd\" (UID: \"c3488187-a863-4211-be1f-edd74f9a3771\") " pod="openshift-controller-manager/controller-manager-589f98ccbb-99gvd" Jan 29 12:12:33 crc kubenswrapper[4753]: I0129 12:12:33.267977 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c3488187-a863-4211-be1f-edd74f9a3771-client-ca\") pod \"controller-manager-589f98ccbb-99gvd\" (UID: \"c3488187-a863-4211-be1f-edd74f9a3771\") " pod="openshift-controller-manager/controller-manager-589f98ccbb-99gvd" Jan 29 12:12:33 crc kubenswrapper[4753]: I0129 12:12:33.268604 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/c3488187-a863-4211-be1f-edd74f9a3771-proxy-ca-bundles\") pod \"controller-manager-589f98ccbb-99gvd\" (UID: \"c3488187-a863-4211-be1f-edd74f9a3771\") " pod="openshift-controller-manager/controller-manager-589f98ccbb-99gvd" Jan 29 12:12:33 crc kubenswrapper[4753]: I0129 12:12:33.269082 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/10e48cb3-f357-49bd-8eee-a576c8814487-client-ca" (OuterVolumeSpecName: "client-ca") pod "10e48cb3-f357-49bd-8eee-a576c8814487" (UID: "10e48cb3-f357-49bd-8eee-a576c8814487"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:12:33 crc kubenswrapper[4753]: I0129 12:12:33.270209 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/10e48cb3-f357-49bd-8eee-a576c8814487-kube-api-access-8b2qn" (OuterVolumeSpecName: "kube-api-access-8b2qn") pod "10e48cb3-f357-49bd-8eee-a576c8814487" (UID: "10e48cb3-f357-49bd-8eee-a576c8814487"). InnerVolumeSpecName "kube-api-access-8b2qn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:12:33 crc kubenswrapper[4753]: I0129 12:12:33.270300 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/10e48cb3-f357-49bd-8eee-a576c8814487-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "10e48cb3-f357-49bd-8eee-a576c8814487" (UID: "10e48cb3-f357-49bd-8eee-a576c8814487"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:12:33 crc kubenswrapper[4753]: I0129 12:12:33.270852 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6958c11f-6d19-4cd0-be14-e796f1df37ee-kube-api-access-jn78n" (OuterVolumeSpecName: "kube-api-access-jn78n") pod "6958c11f-6d19-4cd0-be14-e796f1df37ee" (UID: "6958c11f-6d19-4cd0-be14-e796f1df37ee"). InnerVolumeSpecName "kube-api-access-jn78n". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:12:33 crc kubenswrapper[4753]: I0129 12:12:33.272551 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6958c11f-6d19-4cd0-be14-e796f1df37ee-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6958c11f-6d19-4cd0-be14-e796f1df37ee" (UID: "6958c11f-6d19-4cd0-be14-e796f1df37ee"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:12:33 crc kubenswrapper[4753]: I0129 12:12:33.272619 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c3488187-a863-4211-be1f-edd74f9a3771-serving-cert\") pod \"controller-manager-589f98ccbb-99gvd\" (UID: \"c3488187-a863-4211-be1f-edd74f9a3771\") " pod="openshift-controller-manager/controller-manager-589f98ccbb-99gvd" Jan 29 12:12:33 crc kubenswrapper[4753]: I0129 12:12:33.285773 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2hr5z\" (UniqueName: \"kubernetes.io/projected/c3488187-a863-4211-be1f-edd74f9a3771-kube-api-access-2hr5z\") pod \"controller-manager-589f98ccbb-99gvd\" (UID: \"c3488187-a863-4211-be1f-edd74f9a3771\") " pod="openshift-controller-manager/controller-manager-589f98ccbb-99gvd" Jan 29 12:12:33 crc kubenswrapper[4753]: I0129 12:12:33.366898 4753 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6958c11f-6d19-4cd0-be14-e796f1df37ee-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 12:12:33 crc kubenswrapper[4753]: I0129 12:12:33.366964 4753 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/10e48cb3-f357-49bd-8eee-a576c8814487-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 12:12:33 crc kubenswrapper[4753]: I0129 12:12:33.366982 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jn78n\" (UniqueName: \"kubernetes.io/projected/6958c11f-6d19-4cd0-be14-e796f1df37ee-kube-api-access-jn78n\") on node \"crc\" DevicePath \"\"" Jan 29 12:12:33 crc kubenswrapper[4753]: I0129 12:12:33.367000 4753 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/10e48cb3-f357-49bd-8eee-a576c8814487-client-ca\") on node \"crc\" DevicePath \"\"" Jan 29 12:12:33 crc kubenswrapper[4753]: I0129 12:12:33.367012 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8b2qn\" (UniqueName: \"kubernetes.io/projected/10e48cb3-f357-49bd-8eee-a576c8814487-kube-api-access-8b2qn\") on node \"crc\" DevicePath \"\"" Jan 29 12:12:33 crc kubenswrapper[4753]: I0129 12:12:33.367023 4753 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6958c11f-6d19-4cd0-be14-e796f1df37ee-client-ca\") on node \"crc\" DevicePath \"\"" Jan 29 12:12:33 crc kubenswrapper[4753]: I0129 12:12:33.367035 4753 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6958c11f-6d19-4cd0-be14-e796f1df37ee-config\") on node \"crc\" DevicePath \"\"" Jan 29 12:12:33 crc kubenswrapper[4753]: I0129 12:12:33.475132 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-589f98ccbb-99gvd" Jan 29 12:12:33 crc kubenswrapper[4753]: I0129 12:12:33.489253 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-844bbdf588-qdt2h" Jan 29 12:12:33 crc kubenswrapper[4753]: I0129 12:12:33.489727 4753 generic.go:334] "Generic (PLEG): container finished" podID="10e48cb3-f357-49bd-8eee-a576c8814487" containerID="252f4e2e3424b357db5c66af56852ed8dd6c17586bbf3fb35216f8cf1954cab3" exitCode=0 Jan 29 12:12:33 crc kubenswrapper[4753]: I0129 12:12:33.490099 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-844bbdf588-qdt2h" event={"ID":"10e48cb3-f357-49bd-8eee-a576c8814487","Type":"ContainerDied","Data":"252f4e2e3424b357db5c66af56852ed8dd6c17586bbf3fb35216f8cf1954cab3"} Jan 29 12:12:33 crc kubenswrapper[4753]: I0129 12:12:33.490158 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-844bbdf588-qdt2h" event={"ID":"10e48cb3-f357-49bd-8eee-a576c8814487","Type":"ContainerDied","Data":"a001c08e5aa2986118d6d4711503ebd0ab8573a6c88c3cebf525d1e8a6090dfb"} Jan 29 12:12:33 crc kubenswrapper[4753]: I0129 12:12:33.490188 4753 scope.go:117] "RemoveContainer" containerID="252f4e2e3424b357db5c66af56852ed8dd6c17586bbf3fb35216f8cf1954cab3" Jan 29 12:12:33 crc kubenswrapper[4753]: I0129 12:12:33.492693 4753 generic.go:334] "Generic (PLEG): container finished" podID="6958c11f-6d19-4cd0-be14-e796f1df37ee" containerID="4c75a76ce4e80a4a8fbd77a8e8cd5947963eca8a13120cf1b19a46306c9d5376" exitCode=0 Jan 29 12:12:33 crc kubenswrapper[4753]: I0129 12:12:33.492778 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-c8bcc84b6-vdbzf" event={"ID":"6958c11f-6d19-4cd0-be14-e796f1df37ee","Type":"ContainerDied","Data":"4c75a76ce4e80a4a8fbd77a8e8cd5947963eca8a13120cf1b19a46306c9d5376"} Jan 29 12:12:33 crc kubenswrapper[4753]: I0129 12:12:33.492818 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-c8bcc84b6-vdbzf" event={"ID":"6958c11f-6d19-4cd0-be14-e796f1df37ee","Type":"ContainerDied","Data":"6f13ca5bcef004607a74cd671c47b73cabc51cc8e1bdfc26e90877033d2eb4fe"} Jan 29 12:12:33 crc kubenswrapper[4753]: I0129 12:12:33.492899 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-c8bcc84b6-vdbzf" Jan 29 12:12:33 crc kubenswrapper[4753]: I0129 12:12:33.542827 4753 scope.go:117] "RemoveContainer" containerID="252f4e2e3424b357db5c66af56852ed8dd6c17586bbf3fb35216f8cf1954cab3" Jan 29 12:12:33 crc kubenswrapper[4753]: E0129 12:12:33.550569 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"252f4e2e3424b357db5c66af56852ed8dd6c17586bbf3fb35216f8cf1954cab3\": container with ID starting with 252f4e2e3424b357db5c66af56852ed8dd6c17586bbf3fb35216f8cf1954cab3 not found: ID does not exist" containerID="252f4e2e3424b357db5c66af56852ed8dd6c17586bbf3fb35216f8cf1954cab3" Jan 29 12:12:33 crc kubenswrapper[4753]: I0129 12:12:33.551392 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"252f4e2e3424b357db5c66af56852ed8dd6c17586bbf3fb35216f8cf1954cab3"} err="failed to get container status \"252f4e2e3424b357db5c66af56852ed8dd6c17586bbf3fb35216f8cf1954cab3\": rpc error: code = NotFound desc = could not find container \"252f4e2e3424b357db5c66af56852ed8dd6c17586bbf3fb35216f8cf1954cab3\": container with ID starting with 252f4e2e3424b357db5c66af56852ed8dd6c17586bbf3fb35216f8cf1954cab3 not found: ID does not exist" Jan 29 12:12:33 crc kubenswrapper[4753]: I0129 12:12:33.551564 4753 scope.go:117] "RemoveContainer" containerID="4c75a76ce4e80a4a8fbd77a8e8cd5947963eca8a13120cf1b19a46306c9d5376" Jan 29 12:12:33 crc kubenswrapper[4753]: I0129 12:12:33.555884 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-c8bcc84b6-vdbzf"] Jan 29 12:12:33 crc kubenswrapper[4753]: I0129 12:12:33.563386 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-c8bcc84b6-vdbzf"] Jan 29 12:12:33 crc kubenswrapper[4753]: I0129 12:12:33.573103 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-844bbdf588-qdt2h"] Jan 29 12:12:33 crc kubenswrapper[4753]: I0129 12:12:33.577118 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-844bbdf588-qdt2h"] Jan 29 12:12:33 crc kubenswrapper[4753]: I0129 12:12:33.588475 4753 scope.go:117] "RemoveContainer" containerID="4c75a76ce4e80a4a8fbd77a8e8cd5947963eca8a13120cf1b19a46306c9d5376" Jan 29 12:12:33 crc kubenswrapper[4753]: E0129 12:12:33.590887 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4c75a76ce4e80a4a8fbd77a8e8cd5947963eca8a13120cf1b19a46306c9d5376\": container with ID starting with 4c75a76ce4e80a4a8fbd77a8e8cd5947963eca8a13120cf1b19a46306c9d5376 not found: ID does not exist" containerID="4c75a76ce4e80a4a8fbd77a8e8cd5947963eca8a13120cf1b19a46306c9d5376" Jan 29 12:12:33 crc kubenswrapper[4753]: I0129 12:12:33.590948 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4c75a76ce4e80a4a8fbd77a8e8cd5947963eca8a13120cf1b19a46306c9d5376"} err="failed to get container status \"4c75a76ce4e80a4a8fbd77a8e8cd5947963eca8a13120cf1b19a46306c9d5376\": rpc error: code = NotFound desc = could not find container \"4c75a76ce4e80a4a8fbd77a8e8cd5947963eca8a13120cf1b19a46306c9d5376\": container with ID starting with 4c75a76ce4e80a4a8fbd77a8e8cd5947963eca8a13120cf1b19a46306c9d5376 not found: ID does not exist" Jan 29 12:12:33 crc kubenswrapper[4753]: I0129 12:12:33.898466 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="10e48cb3-f357-49bd-8eee-a576c8814487" path="/var/lib/kubelet/pods/10e48cb3-f357-49bd-8eee-a576c8814487/volumes" Jan 29 12:12:33 crc kubenswrapper[4753]: I0129 12:12:33.899046 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6958c11f-6d19-4cd0-be14-e796f1df37ee" path="/var/lib/kubelet/pods/6958c11f-6d19-4cd0-be14-e796f1df37ee/volumes" Jan 29 12:12:33 crc kubenswrapper[4753]: I0129 12:12:33.912098 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-589f98ccbb-99gvd"] Jan 29 12:12:34 crc kubenswrapper[4753]: I0129 12:12:34.103388 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-64cb998744-pctj8"] Jan 29 12:12:34 crc kubenswrapper[4753]: I0129 12:12:34.104176 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-64cb998744-pctj8" Jan 29 12:12:34 crc kubenswrapper[4753]: I0129 12:12:34.107310 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 29 12:12:34 crc kubenswrapper[4753]: I0129 12:12:34.107920 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 29 12:12:34 crc kubenswrapper[4753]: I0129 12:12:34.108119 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 29 12:12:34 crc kubenswrapper[4753]: I0129 12:12:34.108444 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 29 12:12:34 crc kubenswrapper[4753]: I0129 12:12:34.108628 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 29 12:12:34 crc kubenswrapper[4753]: I0129 12:12:34.108967 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 29 12:12:34 crc kubenswrapper[4753]: I0129 12:12:34.135491 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-64cb998744-pctj8"] Jan 29 12:12:34 crc kubenswrapper[4753]: I0129 12:12:34.182394 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pmd7q\" (UniqueName: \"kubernetes.io/projected/56c216e3-1be1-4d50-863c-6c7a8020888c-kube-api-access-pmd7q\") pod \"route-controller-manager-64cb998744-pctj8\" (UID: \"56c216e3-1be1-4d50-863c-6c7a8020888c\") " pod="openshift-route-controller-manager/route-controller-manager-64cb998744-pctj8" Jan 29 12:12:34 crc kubenswrapper[4753]: I0129 12:12:34.182948 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/56c216e3-1be1-4d50-863c-6c7a8020888c-client-ca\") pod \"route-controller-manager-64cb998744-pctj8\" (UID: \"56c216e3-1be1-4d50-863c-6c7a8020888c\") " pod="openshift-route-controller-manager/route-controller-manager-64cb998744-pctj8" Jan 29 12:12:34 crc kubenswrapper[4753]: I0129 12:12:34.182997 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/56c216e3-1be1-4d50-863c-6c7a8020888c-serving-cert\") pod \"route-controller-manager-64cb998744-pctj8\" (UID: \"56c216e3-1be1-4d50-863c-6c7a8020888c\") " pod="openshift-route-controller-manager/route-controller-manager-64cb998744-pctj8" Jan 29 12:12:34 crc kubenswrapper[4753]: I0129 12:12:34.183069 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/56c216e3-1be1-4d50-863c-6c7a8020888c-config\") pod \"route-controller-manager-64cb998744-pctj8\" (UID: \"56c216e3-1be1-4d50-863c-6c7a8020888c\") " pod="openshift-route-controller-manager/route-controller-manager-64cb998744-pctj8" Jan 29 12:12:34 crc kubenswrapper[4753]: I0129 12:12:34.284535 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pmd7q\" (UniqueName: \"kubernetes.io/projected/56c216e3-1be1-4d50-863c-6c7a8020888c-kube-api-access-pmd7q\") pod \"route-controller-manager-64cb998744-pctj8\" (UID: \"56c216e3-1be1-4d50-863c-6c7a8020888c\") " pod="openshift-route-controller-manager/route-controller-manager-64cb998744-pctj8" Jan 29 12:12:34 crc kubenswrapper[4753]: I0129 12:12:34.284646 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/56c216e3-1be1-4d50-863c-6c7a8020888c-client-ca\") pod \"route-controller-manager-64cb998744-pctj8\" (UID: \"56c216e3-1be1-4d50-863c-6c7a8020888c\") " pod="openshift-route-controller-manager/route-controller-manager-64cb998744-pctj8" Jan 29 12:12:34 crc kubenswrapper[4753]: I0129 12:12:34.284677 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/56c216e3-1be1-4d50-863c-6c7a8020888c-serving-cert\") pod \"route-controller-manager-64cb998744-pctj8\" (UID: \"56c216e3-1be1-4d50-863c-6c7a8020888c\") " pod="openshift-route-controller-manager/route-controller-manager-64cb998744-pctj8" Jan 29 12:12:34 crc kubenswrapper[4753]: I0129 12:12:34.284735 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/56c216e3-1be1-4d50-863c-6c7a8020888c-config\") pod \"route-controller-manager-64cb998744-pctj8\" (UID: \"56c216e3-1be1-4d50-863c-6c7a8020888c\") " pod="openshift-route-controller-manager/route-controller-manager-64cb998744-pctj8" Jan 29 12:12:34 crc kubenswrapper[4753]: I0129 12:12:34.286062 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/56c216e3-1be1-4d50-863c-6c7a8020888c-config\") pod \"route-controller-manager-64cb998744-pctj8\" (UID: \"56c216e3-1be1-4d50-863c-6c7a8020888c\") " pod="openshift-route-controller-manager/route-controller-manager-64cb998744-pctj8" Jan 29 12:12:34 crc kubenswrapper[4753]: I0129 12:12:34.286560 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/56c216e3-1be1-4d50-863c-6c7a8020888c-client-ca\") pod \"route-controller-manager-64cb998744-pctj8\" (UID: \"56c216e3-1be1-4d50-863c-6c7a8020888c\") " pod="openshift-route-controller-manager/route-controller-manager-64cb998744-pctj8" Jan 29 12:12:34 crc kubenswrapper[4753]: I0129 12:12:34.292217 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/56c216e3-1be1-4d50-863c-6c7a8020888c-serving-cert\") pod \"route-controller-manager-64cb998744-pctj8\" (UID: \"56c216e3-1be1-4d50-863c-6c7a8020888c\") " pod="openshift-route-controller-manager/route-controller-manager-64cb998744-pctj8" Jan 29 12:12:34 crc kubenswrapper[4753]: I0129 12:12:34.302454 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pmd7q\" (UniqueName: \"kubernetes.io/projected/56c216e3-1be1-4d50-863c-6c7a8020888c-kube-api-access-pmd7q\") pod \"route-controller-manager-64cb998744-pctj8\" (UID: \"56c216e3-1be1-4d50-863c-6c7a8020888c\") " pod="openshift-route-controller-manager/route-controller-manager-64cb998744-pctj8" Jan 29 12:12:34 crc kubenswrapper[4753]: I0129 12:12:34.431903 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-64cb998744-pctj8" Jan 29 12:12:34 crc kubenswrapper[4753]: I0129 12:12:34.518197 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-589f98ccbb-99gvd" event={"ID":"c3488187-a863-4211-be1f-edd74f9a3771","Type":"ContainerStarted","Data":"fe50d83774a87da98164edd1ee06153ab4da9bcab55cda78e65708a4b75c48ce"} Jan 29 12:12:34 crc kubenswrapper[4753]: I0129 12:12:34.518276 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-589f98ccbb-99gvd" event={"ID":"c3488187-a863-4211-be1f-edd74f9a3771","Type":"ContainerStarted","Data":"4cd4bb28be1c44c610b22a16f099f4503f6d404e06ed316fc960725bb8a06445"} Jan 29 12:12:34 crc kubenswrapper[4753]: I0129 12:12:34.520239 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-589f98ccbb-99gvd" Jan 29 12:12:34 crc kubenswrapper[4753]: I0129 12:12:34.531636 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-589f98ccbb-99gvd" Jan 29 12:12:34 crc kubenswrapper[4753]: I0129 12:12:34.554718 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-589f98ccbb-99gvd" podStartSLOduration=2.554694199 podStartE2EDuration="2.554694199s" podCreationTimestamp="2026-01-29 12:12:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:12:34.54609555 +0000 UTC m=+368.798177005" watchObservedRunningTime="2026-01-29 12:12:34.554694199 +0000 UTC m=+368.806775654" Jan 29 12:12:34 crc kubenswrapper[4753]: I0129 12:12:34.771021 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-64cb998744-pctj8"] Jan 29 12:12:34 crc kubenswrapper[4753]: W0129 12:12:34.782161 4753 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod56c216e3_1be1_4d50_863c_6c7a8020888c.slice/crio-34e9f5456324a2509632f2ca54db61d767aa7e8d43ac590d456a74184b7e4f18 WatchSource:0}: Error finding container 34e9f5456324a2509632f2ca54db61d767aa7e8d43ac590d456a74184b7e4f18: Status 404 returned error can't find the container with id 34e9f5456324a2509632f2ca54db61d767aa7e8d43ac590d456a74184b7e4f18 Jan 29 12:12:35 crc kubenswrapper[4753]: I0129 12:12:35.530075 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-64cb998744-pctj8" event={"ID":"56c216e3-1be1-4d50-863c-6c7a8020888c","Type":"ContainerStarted","Data":"7db8af342b315c38ecb25c3e7dfab2aab9c2d92ea1e129f840fb29cb047b90d5"} Jan 29 12:12:35 crc kubenswrapper[4753]: I0129 12:12:35.530561 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-64cb998744-pctj8" event={"ID":"56c216e3-1be1-4d50-863c-6c7a8020888c","Type":"ContainerStarted","Data":"34e9f5456324a2509632f2ca54db61d767aa7e8d43ac590d456a74184b7e4f18"} Jan 29 12:12:35 crc kubenswrapper[4753]: I0129 12:12:35.530928 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-64cb998744-pctj8" Jan 29 12:12:35 crc kubenswrapper[4753]: I0129 12:12:35.552980 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-64cb998744-pctj8" podStartSLOduration=3.552951777 podStartE2EDuration="3.552951777s" podCreationTimestamp="2026-01-29 12:12:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:12:35.550782932 +0000 UTC m=+369.802864397" watchObservedRunningTime="2026-01-29 12:12:35.552951777 +0000 UTC m=+369.805033232" Jan 29 12:12:35 crc kubenswrapper[4753]: I0129 12:12:35.562761 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-64cb998744-pctj8" Jan 29 12:12:38 crc kubenswrapper[4753]: I0129 12:12:38.540884 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-558db77b4-zdhjn" podUID="9728fd7e-6203-4082-9297-2d3fd9e17b74" containerName="oauth-openshift" containerID="cri-o://f00710772f44f4304d4013d4a1a5bcd7a1c5f1eef491b88d83a683fe1f1522f1" gracePeriod=15 Jan 29 12:12:39 crc kubenswrapper[4753]: I0129 12:12:39.557103 4753 generic.go:334] "Generic (PLEG): container finished" podID="9728fd7e-6203-4082-9297-2d3fd9e17b74" containerID="f00710772f44f4304d4013d4a1a5bcd7a1c5f1eef491b88d83a683fe1f1522f1" exitCode=0 Jan 29 12:12:39 crc kubenswrapper[4753]: I0129 12:12:39.557248 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-zdhjn" event={"ID":"9728fd7e-6203-4082-9297-2d3fd9e17b74","Type":"ContainerDied","Data":"f00710772f44f4304d4013d4a1a5bcd7a1c5f1eef491b88d83a683fe1f1522f1"} Jan 29 12:12:39 crc kubenswrapper[4753]: I0129 12:12:39.600492 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-zdhjn" Jan 29 12:12:39 crc kubenswrapper[4753]: I0129 12:12:39.679559 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/9728fd7e-6203-4082-9297-2d3fd9e17b74-v4-0-config-user-template-login\") pod \"9728fd7e-6203-4082-9297-2d3fd9e17b74\" (UID: \"9728fd7e-6203-4082-9297-2d3fd9e17b74\") " Jan 29 12:12:39 crc kubenswrapper[4753]: I0129 12:12:39.679623 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/9728fd7e-6203-4082-9297-2d3fd9e17b74-v4-0-config-system-trusted-ca-bundle\") pod \"9728fd7e-6203-4082-9297-2d3fd9e17b74\" (UID: \"9728fd7e-6203-4082-9297-2d3fd9e17b74\") " Jan 29 12:12:39 crc kubenswrapper[4753]: I0129 12:12:39.679679 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/9728fd7e-6203-4082-9297-2d3fd9e17b74-v4-0-config-user-idp-0-file-data\") pod \"9728fd7e-6203-4082-9297-2d3fd9e17b74\" (UID: \"9728fd7e-6203-4082-9297-2d3fd9e17b74\") " Jan 29 12:12:39 crc kubenswrapper[4753]: I0129 12:12:39.679703 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/9728fd7e-6203-4082-9297-2d3fd9e17b74-v4-0-config-system-ocp-branding-template\") pod \"9728fd7e-6203-4082-9297-2d3fd9e17b74\" (UID: \"9728fd7e-6203-4082-9297-2d3fd9e17b74\") " Jan 29 12:12:39 crc kubenswrapper[4753]: I0129 12:12:39.679733 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/9728fd7e-6203-4082-9297-2d3fd9e17b74-v4-0-config-system-cliconfig\") pod \"9728fd7e-6203-4082-9297-2d3fd9e17b74\" (UID: \"9728fd7e-6203-4082-9297-2d3fd9e17b74\") " Jan 29 12:12:39 crc kubenswrapper[4753]: I0129 12:12:39.679760 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/9728fd7e-6203-4082-9297-2d3fd9e17b74-v4-0-config-system-router-certs\") pod \"9728fd7e-6203-4082-9297-2d3fd9e17b74\" (UID: \"9728fd7e-6203-4082-9297-2d3fd9e17b74\") " Jan 29 12:12:39 crc kubenswrapper[4753]: I0129 12:12:39.679775 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/9728fd7e-6203-4082-9297-2d3fd9e17b74-v4-0-config-system-service-ca\") pod \"9728fd7e-6203-4082-9297-2d3fd9e17b74\" (UID: \"9728fd7e-6203-4082-9297-2d3fd9e17b74\") " Jan 29 12:12:39 crc kubenswrapper[4753]: I0129 12:12:39.679799 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/9728fd7e-6203-4082-9297-2d3fd9e17b74-v4-0-config-user-template-error\") pod \"9728fd7e-6203-4082-9297-2d3fd9e17b74\" (UID: \"9728fd7e-6203-4082-9297-2d3fd9e17b74\") " Jan 29 12:12:39 crc kubenswrapper[4753]: I0129 12:12:39.679830 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/9728fd7e-6203-4082-9297-2d3fd9e17b74-audit-dir\") pod \"9728fd7e-6203-4082-9297-2d3fd9e17b74\" (UID: \"9728fd7e-6203-4082-9297-2d3fd9e17b74\") " Jan 29 12:12:39 crc kubenswrapper[4753]: I0129 12:12:39.679859 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/9728fd7e-6203-4082-9297-2d3fd9e17b74-v4-0-config-system-session\") pod \"9728fd7e-6203-4082-9297-2d3fd9e17b74\" (UID: \"9728fd7e-6203-4082-9297-2d3fd9e17b74\") " Jan 29 12:12:39 crc kubenswrapper[4753]: I0129 12:12:39.679880 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/9728fd7e-6203-4082-9297-2d3fd9e17b74-audit-policies\") pod \"9728fd7e-6203-4082-9297-2d3fd9e17b74\" (UID: \"9728fd7e-6203-4082-9297-2d3fd9e17b74\") " Jan 29 12:12:39 crc kubenswrapper[4753]: I0129 12:12:39.679920 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9fdsb\" (UniqueName: \"kubernetes.io/projected/9728fd7e-6203-4082-9297-2d3fd9e17b74-kube-api-access-9fdsb\") pod \"9728fd7e-6203-4082-9297-2d3fd9e17b74\" (UID: \"9728fd7e-6203-4082-9297-2d3fd9e17b74\") " Jan 29 12:12:39 crc kubenswrapper[4753]: I0129 12:12:39.679960 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/9728fd7e-6203-4082-9297-2d3fd9e17b74-v4-0-config-user-template-provider-selection\") pod \"9728fd7e-6203-4082-9297-2d3fd9e17b74\" (UID: \"9728fd7e-6203-4082-9297-2d3fd9e17b74\") " Jan 29 12:12:39 crc kubenswrapper[4753]: I0129 12:12:39.679985 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/9728fd7e-6203-4082-9297-2d3fd9e17b74-v4-0-config-system-serving-cert\") pod \"9728fd7e-6203-4082-9297-2d3fd9e17b74\" (UID: \"9728fd7e-6203-4082-9297-2d3fd9e17b74\") " Jan 29 12:12:39 crc kubenswrapper[4753]: I0129 12:12:39.680878 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/9728fd7e-6203-4082-9297-2d3fd9e17b74-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "9728fd7e-6203-4082-9297-2d3fd9e17b74" (UID: "9728fd7e-6203-4082-9297-2d3fd9e17b74"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 12:12:39 crc kubenswrapper[4753]: I0129 12:12:39.681765 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9728fd7e-6203-4082-9297-2d3fd9e17b74-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "9728fd7e-6203-4082-9297-2d3fd9e17b74" (UID: "9728fd7e-6203-4082-9297-2d3fd9e17b74"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:12:39 crc kubenswrapper[4753]: I0129 12:12:39.682268 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9728fd7e-6203-4082-9297-2d3fd9e17b74-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "9728fd7e-6203-4082-9297-2d3fd9e17b74" (UID: "9728fd7e-6203-4082-9297-2d3fd9e17b74"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:12:39 crc kubenswrapper[4753]: I0129 12:12:39.682382 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9728fd7e-6203-4082-9297-2d3fd9e17b74-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "9728fd7e-6203-4082-9297-2d3fd9e17b74" (UID: "9728fd7e-6203-4082-9297-2d3fd9e17b74"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:12:39 crc kubenswrapper[4753]: I0129 12:12:39.684592 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9728fd7e-6203-4082-9297-2d3fd9e17b74-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "9728fd7e-6203-4082-9297-2d3fd9e17b74" (UID: "9728fd7e-6203-4082-9297-2d3fd9e17b74"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:12:39 crc kubenswrapper[4753]: I0129 12:12:39.689287 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9728fd7e-6203-4082-9297-2d3fd9e17b74-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "9728fd7e-6203-4082-9297-2d3fd9e17b74" (UID: "9728fd7e-6203-4082-9297-2d3fd9e17b74"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:12:39 crc kubenswrapper[4753]: I0129 12:12:39.702347 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9728fd7e-6203-4082-9297-2d3fd9e17b74-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "9728fd7e-6203-4082-9297-2d3fd9e17b74" (UID: "9728fd7e-6203-4082-9297-2d3fd9e17b74"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:12:39 crc kubenswrapper[4753]: I0129 12:12:39.703111 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9728fd7e-6203-4082-9297-2d3fd9e17b74-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "9728fd7e-6203-4082-9297-2d3fd9e17b74" (UID: "9728fd7e-6203-4082-9297-2d3fd9e17b74"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:12:39 crc kubenswrapper[4753]: I0129 12:12:39.703215 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9728fd7e-6203-4082-9297-2d3fd9e17b74-kube-api-access-9fdsb" (OuterVolumeSpecName: "kube-api-access-9fdsb") pod "9728fd7e-6203-4082-9297-2d3fd9e17b74" (UID: "9728fd7e-6203-4082-9297-2d3fd9e17b74"). InnerVolumeSpecName "kube-api-access-9fdsb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:12:39 crc kubenswrapper[4753]: I0129 12:12:39.703868 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9728fd7e-6203-4082-9297-2d3fd9e17b74-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "9728fd7e-6203-4082-9297-2d3fd9e17b74" (UID: "9728fd7e-6203-4082-9297-2d3fd9e17b74"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:12:39 crc kubenswrapper[4753]: I0129 12:12:39.704065 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9728fd7e-6203-4082-9297-2d3fd9e17b74-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "9728fd7e-6203-4082-9297-2d3fd9e17b74" (UID: "9728fd7e-6203-4082-9297-2d3fd9e17b74"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:12:39 crc kubenswrapper[4753]: I0129 12:12:39.704331 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9728fd7e-6203-4082-9297-2d3fd9e17b74-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "9728fd7e-6203-4082-9297-2d3fd9e17b74" (UID: "9728fd7e-6203-4082-9297-2d3fd9e17b74"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:12:39 crc kubenswrapper[4753]: I0129 12:12:39.705054 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9728fd7e-6203-4082-9297-2d3fd9e17b74-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "9728fd7e-6203-4082-9297-2d3fd9e17b74" (UID: "9728fd7e-6203-4082-9297-2d3fd9e17b74"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:12:39 crc kubenswrapper[4753]: I0129 12:12:39.705544 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9728fd7e-6203-4082-9297-2d3fd9e17b74-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "9728fd7e-6203-4082-9297-2d3fd9e17b74" (UID: "9728fd7e-6203-4082-9297-2d3fd9e17b74"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:12:39 crc kubenswrapper[4753]: I0129 12:12:39.782049 4753 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/9728fd7e-6203-4082-9297-2d3fd9e17b74-audit-dir\") on node \"crc\" DevicePath \"\"" Jan 29 12:12:39 crc kubenswrapper[4753]: I0129 12:12:39.782127 4753 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/9728fd7e-6203-4082-9297-2d3fd9e17b74-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Jan 29 12:12:39 crc kubenswrapper[4753]: I0129 12:12:39.782171 4753 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/9728fd7e-6203-4082-9297-2d3fd9e17b74-audit-policies\") on node \"crc\" DevicePath \"\"" Jan 29 12:12:39 crc kubenswrapper[4753]: I0129 12:12:39.782184 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9fdsb\" (UniqueName: \"kubernetes.io/projected/9728fd7e-6203-4082-9297-2d3fd9e17b74-kube-api-access-9fdsb\") on node \"crc\" DevicePath \"\"" Jan 29 12:12:39 crc kubenswrapper[4753]: I0129 12:12:39.782199 4753 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/9728fd7e-6203-4082-9297-2d3fd9e17b74-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Jan 29 12:12:39 crc kubenswrapper[4753]: I0129 12:12:39.782476 4753 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/9728fd7e-6203-4082-9297-2d3fd9e17b74-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 12:12:39 crc kubenswrapper[4753]: I0129 12:12:39.782497 4753 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/9728fd7e-6203-4082-9297-2d3fd9e17b74-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Jan 29 12:12:39 crc kubenswrapper[4753]: I0129 12:12:39.782538 4753 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/9728fd7e-6203-4082-9297-2d3fd9e17b74-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 12:12:39 crc kubenswrapper[4753]: I0129 12:12:39.782554 4753 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/9728fd7e-6203-4082-9297-2d3fd9e17b74-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Jan 29 12:12:39 crc kubenswrapper[4753]: I0129 12:12:39.782567 4753 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/9728fd7e-6203-4082-9297-2d3fd9e17b74-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Jan 29 12:12:39 crc kubenswrapper[4753]: I0129 12:12:39.782578 4753 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/9728fd7e-6203-4082-9297-2d3fd9e17b74-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Jan 29 12:12:39 crc kubenswrapper[4753]: I0129 12:12:39.782593 4753 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/9728fd7e-6203-4082-9297-2d3fd9e17b74-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Jan 29 12:12:39 crc kubenswrapper[4753]: I0129 12:12:39.782605 4753 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/9728fd7e-6203-4082-9297-2d3fd9e17b74-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Jan 29 12:12:39 crc kubenswrapper[4753]: I0129 12:12:39.782619 4753 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/9728fd7e-6203-4082-9297-2d3fd9e17b74-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Jan 29 12:12:40 crc kubenswrapper[4753]: I0129 12:12:40.112445 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-784cdd6c44-r7qtk"] Jan 29 12:12:40 crc kubenswrapper[4753]: E0129 12:12:40.112737 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9728fd7e-6203-4082-9297-2d3fd9e17b74" containerName="oauth-openshift" Jan 29 12:12:40 crc kubenswrapper[4753]: I0129 12:12:40.112752 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="9728fd7e-6203-4082-9297-2d3fd9e17b74" containerName="oauth-openshift" Jan 29 12:12:40 crc kubenswrapper[4753]: I0129 12:12:40.112856 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="9728fd7e-6203-4082-9297-2d3fd9e17b74" containerName="oauth-openshift" Jan 29 12:12:40 crc kubenswrapper[4753]: I0129 12:12:40.113269 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-784cdd6c44-r7qtk" Jan 29 12:12:40 crc kubenswrapper[4753]: I0129 12:12:40.126821 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-784cdd6c44-r7qtk"] Jan 29 12:12:40 crc kubenswrapper[4753]: I0129 12:12:40.189319 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/56946fea-4fb3-4365-adfe-42e4a646c881-v4-0-config-user-template-error\") pod \"oauth-openshift-784cdd6c44-r7qtk\" (UID: \"56946fea-4fb3-4365-adfe-42e4a646c881\") " pod="openshift-authentication/oauth-openshift-784cdd6c44-r7qtk" Jan 29 12:12:40 crc kubenswrapper[4753]: I0129 12:12:40.189386 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fnksk\" (UniqueName: \"kubernetes.io/projected/56946fea-4fb3-4365-adfe-42e4a646c881-kube-api-access-fnksk\") pod \"oauth-openshift-784cdd6c44-r7qtk\" (UID: \"56946fea-4fb3-4365-adfe-42e4a646c881\") " pod="openshift-authentication/oauth-openshift-784cdd6c44-r7qtk" Jan 29 12:12:40 crc kubenswrapper[4753]: I0129 12:12:40.189443 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/56946fea-4fb3-4365-adfe-42e4a646c881-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-784cdd6c44-r7qtk\" (UID: \"56946fea-4fb3-4365-adfe-42e4a646c881\") " pod="openshift-authentication/oauth-openshift-784cdd6c44-r7qtk" Jan 29 12:12:40 crc kubenswrapper[4753]: I0129 12:12:40.189510 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/56946fea-4fb3-4365-adfe-42e4a646c881-v4-0-config-user-template-login\") pod \"oauth-openshift-784cdd6c44-r7qtk\" (UID: \"56946fea-4fb3-4365-adfe-42e4a646c881\") " pod="openshift-authentication/oauth-openshift-784cdd6c44-r7qtk" Jan 29 12:12:40 crc kubenswrapper[4753]: I0129 12:12:40.189531 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/56946fea-4fb3-4365-adfe-42e4a646c881-v4-0-config-system-session\") pod \"oauth-openshift-784cdd6c44-r7qtk\" (UID: \"56946fea-4fb3-4365-adfe-42e4a646c881\") " pod="openshift-authentication/oauth-openshift-784cdd6c44-r7qtk" Jan 29 12:12:40 crc kubenswrapper[4753]: I0129 12:12:40.189568 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/56946fea-4fb3-4365-adfe-42e4a646c881-v4-0-config-system-cliconfig\") pod \"oauth-openshift-784cdd6c44-r7qtk\" (UID: \"56946fea-4fb3-4365-adfe-42e4a646c881\") " pod="openshift-authentication/oauth-openshift-784cdd6c44-r7qtk" Jan 29 12:12:40 crc kubenswrapper[4753]: I0129 12:12:40.189585 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/56946fea-4fb3-4365-adfe-42e4a646c881-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-784cdd6c44-r7qtk\" (UID: \"56946fea-4fb3-4365-adfe-42e4a646c881\") " pod="openshift-authentication/oauth-openshift-784cdd6c44-r7qtk" Jan 29 12:12:40 crc kubenswrapper[4753]: I0129 12:12:40.189612 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/56946fea-4fb3-4365-adfe-42e4a646c881-audit-dir\") pod \"oauth-openshift-784cdd6c44-r7qtk\" (UID: \"56946fea-4fb3-4365-adfe-42e4a646c881\") " pod="openshift-authentication/oauth-openshift-784cdd6c44-r7qtk" Jan 29 12:12:40 crc kubenswrapper[4753]: I0129 12:12:40.189640 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/56946fea-4fb3-4365-adfe-42e4a646c881-audit-policies\") pod \"oauth-openshift-784cdd6c44-r7qtk\" (UID: \"56946fea-4fb3-4365-adfe-42e4a646c881\") " pod="openshift-authentication/oauth-openshift-784cdd6c44-r7qtk" Jan 29 12:12:40 crc kubenswrapper[4753]: I0129 12:12:40.189804 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/56946fea-4fb3-4365-adfe-42e4a646c881-v4-0-config-system-serving-cert\") pod \"oauth-openshift-784cdd6c44-r7qtk\" (UID: \"56946fea-4fb3-4365-adfe-42e4a646c881\") " pod="openshift-authentication/oauth-openshift-784cdd6c44-r7qtk" Jan 29 12:12:40 crc kubenswrapper[4753]: I0129 12:12:40.189857 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/56946fea-4fb3-4365-adfe-42e4a646c881-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-784cdd6c44-r7qtk\" (UID: \"56946fea-4fb3-4365-adfe-42e4a646c881\") " pod="openshift-authentication/oauth-openshift-784cdd6c44-r7qtk" Jan 29 12:12:40 crc kubenswrapper[4753]: I0129 12:12:40.189915 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/56946fea-4fb3-4365-adfe-42e4a646c881-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-784cdd6c44-r7qtk\" (UID: \"56946fea-4fb3-4365-adfe-42e4a646c881\") " pod="openshift-authentication/oauth-openshift-784cdd6c44-r7qtk" Jan 29 12:12:40 crc kubenswrapper[4753]: I0129 12:12:40.190189 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/56946fea-4fb3-4365-adfe-42e4a646c881-v4-0-config-system-service-ca\") pod \"oauth-openshift-784cdd6c44-r7qtk\" (UID: \"56946fea-4fb3-4365-adfe-42e4a646c881\") " pod="openshift-authentication/oauth-openshift-784cdd6c44-r7qtk" Jan 29 12:12:40 crc kubenswrapper[4753]: I0129 12:12:40.190388 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/56946fea-4fb3-4365-adfe-42e4a646c881-v4-0-config-system-router-certs\") pod \"oauth-openshift-784cdd6c44-r7qtk\" (UID: \"56946fea-4fb3-4365-adfe-42e4a646c881\") " pod="openshift-authentication/oauth-openshift-784cdd6c44-r7qtk" Jan 29 12:12:40 crc kubenswrapper[4753]: I0129 12:12:40.292083 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/56946fea-4fb3-4365-adfe-42e4a646c881-v4-0-config-system-router-certs\") pod \"oauth-openshift-784cdd6c44-r7qtk\" (UID: \"56946fea-4fb3-4365-adfe-42e4a646c881\") " pod="openshift-authentication/oauth-openshift-784cdd6c44-r7qtk" Jan 29 12:12:40 crc kubenswrapper[4753]: I0129 12:12:40.292717 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/56946fea-4fb3-4365-adfe-42e4a646c881-v4-0-config-user-template-error\") pod \"oauth-openshift-784cdd6c44-r7qtk\" (UID: \"56946fea-4fb3-4365-adfe-42e4a646c881\") " pod="openshift-authentication/oauth-openshift-784cdd6c44-r7qtk" Jan 29 12:12:40 crc kubenswrapper[4753]: I0129 12:12:40.292749 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fnksk\" (UniqueName: \"kubernetes.io/projected/56946fea-4fb3-4365-adfe-42e4a646c881-kube-api-access-fnksk\") pod \"oauth-openshift-784cdd6c44-r7qtk\" (UID: \"56946fea-4fb3-4365-adfe-42e4a646c881\") " pod="openshift-authentication/oauth-openshift-784cdd6c44-r7qtk" Jan 29 12:12:40 crc kubenswrapper[4753]: I0129 12:12:40.292782 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/56946fea-4fb3-4365-adfe-42e4a646c881-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-784cdd6c44-r7qtk\" (UID: \"56946fea-4fb3-4365-adfe-42e4a646c881\") " pod="openshift-authentication/oauth-openshift-784cdd6c44-r7qtk" Jan 29 12:12:40 crc kubenswrapper[4753]: I0129 12:12:40.292880 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/56946fea-4fb3-4365-adfe-42e4a646c881-v4-0-config-user-template-login\") pod \"oauth-openshift-784cdd6c44-r7qtk\" (UID: \"56946fea-4fb3-4365-adfe-42e4a646c881\") " pod="openshift-authentication/oauth-openshift-784cdd6c44-r7qtk" Jan 29 12:12:40 crc kubenswrapper[4753]: I0129 12:12:40.292904 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/56946fea-4fb3-4365-adfe-42e4a646c881-v4-0-config-system-session\") pod \"oauth-openshift-784cdd6c44-r7qtk\" (UID: \"56946fea-4fb3-4365-adfe-42e4a646c881\") " pod="openshift-authentication/oauth-openshift-784cdd6c44-r7qtk" Jan 29 12:12:40 crc kubenswrapper[4753]: I0129 12:12:40.292963 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/56946fea-4fb3-4365-adfe-42e4a646c881-v4-0-config-system-cliconfig\") pod \"oauth-openshift-784cdd6c44-r7qtk\" (UID: \"56946fea-4fb3-4365-adfe-42e4a646c881\") " pod="openshift-authentication/oauth-openshift-784cdd6c44-r7qtk" Jan 29 12:12:40 crc kubenswrapper[4753]: I0129 12:12:40.293001 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/56946fea-4fb3-4365-adfe-42e4a646c881-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-784cdd6c44-r7qtk\" (UID: \"56946fea-4fb3-4365-adfe-42e4a646c881\") " pod="openshift-authentication/oauth-openshift-784cdd6c44-r7qtk" Jan 29 12:12:40 crc kubenswrapper[4753]: I0129 12:12:40.293046 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/56946fea-4fb3-4365-adfe-42e4a646c881-audit-dir\") pod \"oauth-openshift-784cdd6c44-r7qtk\" (UID: \"56946fea-4fb3-4365-adfe-42e4a646c881\") " pod="openshift-authentication/oauth-openshift-784cdd6c44-r7qtk" Jan 29 12:12:40 crc kubenswrapper[4753]: I0129 12:12:40.293103 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/56946fea-4fb3-4365-adfe-42e4a646c881-audit-policies\") pod \"oauth-openshift-784cdd6c44-r7qtk\" (UID: \"56946fea-4fb3-4365-adfe-42e4a646c881\") " pod="openshift-authentication/oauth-openshift-784cdd6c44-r7qtk" Jan 29 12:12:40 crc kubenswrapper[4753]: I0129 12:12:40.293164 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/56946fea-4fb3-4365-adfe-42e4a646c881-v4-0-config-system-serving-cert\") pod \"oauth-openshift-784cdd6c44-r7qtk\" (UID: \"56946fea-4fb3-4365-adfe-42e4a646c881\") " pod="openshift-authentication/oauth-openshift-784cdd6c44-r7qtk" Jan 29 12:12:40 crc kubenswrapper[4753]: I0129 12:12:40.293188 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/56946fea-4fb3-4365-adfe-42e4a646c881-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-784cdd6c44-r7qtk\" (UID: \"56946fea-4fb3-4365-adfe-42e4a646c881\") " pod="openshift-authentication/oauth-openshift-784cdd6c44-r7qtk" Jan 29 12:12:40 crc kubenswrapper[4753]: I0129 12:12:40.293214 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/56946fea-4fb3-4365-adfe-42e4a646c881-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-784cdd6c44-r7qtk\" (UID: \"56946fea-4fb3-4365-adfe-42e4a646c881\") " pod="openshift-authentication/oauth-openshift-784cdd6c44-r7qtk" Jan 29 12:12:40 crc kubenswrapper[4753]: I0129 12:12:40.293319 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/56946fea-4fb3-4365-adfe-42e4a646c881-v4-0-config-system-service-ca\") pod \"oauth-openshift-784cdd6c44-r7qtk\" (UID: \"56946fea-4fb3-4365-adfe-42e4a646c881\") " pod="openshift-authentication/oauth-openshift-784cdd6c44-r7qtk" Jan 29 12:12:40 crc kubenswrapper[4753]: I0129 12:12:40.294533 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/56946fea-4fb3-4365-adfe-42e4a646c881-v4-0-config-system-service-ca\") pod \"oauth-openshift-784cdd6c44-r7qtk\" (UID: \"56946fea-4fb3-4365-adfe-42e4a646c881\") " pod="openshift-authentication/oauth-openshift-784cdd6c44-r7qtk" Jan 29 12:12:40 crc kubenswrapper[4753]: I0129 12:12:40.294533 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/56946fea-4fb3-4365-adfe-42e4a646c881-v4-0-config-system-cliconfig\") pod \"oauth-openshift-784cdd6c44-r7qtk\" (UID: \"56946fea-4fb3-4365-adfe-42e4a646c881\") " pod="openshift-authentication/oauth-openshift-784cdd6c44-r7qtk" Jan 29 12:12:40 crc kubenswrapper[4753]: I0129 12:12:40.294706 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/56946fea-4fb3-4365-adfe-42e4a646c881-audit-policies\") pod \"oauth-openshift-784cdd6c44-r7qtk\" (UID: \"56946fea-4fb3-4365-adfe-42e4a646c881\") " pod="openshift-authentication/oauth-openshift-784cdd6c44-r7qtk" Jan 29 12:12:40 crc kubenswrapper[4753]: I0129 12:12:40.294805 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/56946fea-4fb3-4365-adfe-42e4a646c881-audit-dir\") pod \"oauth-openshift-784cdd6c44-r7qtk\" (UID: \"56946fea-4fb3-4365-adfe-42e4a646c881\") " pod="openshift-authentication/oauth-openshift-784cdd6c44-r7qtk" Jan 29 12:12:40 crc kubenswrapper[4753]: I0129 12:12:40.295990 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/56946fea-4fb3-4365-adfe-42e4a646c881-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-784cdd6c44-r7qtk\" (UID: \"56946fea-4fb3-4365-adfe-42e4a646c881\") " pod="openshift-authentication/oauth-openshift-784cdd6c44-r7qtk" Jan 29 12:12:40 crc kubenswrapper[4753]: I0129 12:12:40.297591 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/56946fea-4fb3-4365-adfe-42e4a646c881-v4-0-config-system-router-certs\") pod \"oauth-openshift-784cdd6c44-r7qtk\" (UID: \"56946fea-4fb3-4365-adfe-42e4a646c881\") " pod="openshift-authentication/oauth-openshift-784cdd6c44-r7qtk" Jan 29 12:12:40 crc kubenswrapper[4753]: I0129 12:12:40.297616 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/56946fea-4fb3-4365-adfe-42e4a646c881-v4-0-config-user-template-error\") pod \"oauth-openshift-784cdd6c44-r7qtk\" (UID: \"56946fea-4fb3-4365-adfe-42e4a646c881\") " pod="openshift-authentication/oauth-openshift-784cdd6c44-r7qtk" Jan 29 12:12:40 crc kubenswrapper[4753]: I0129 12:12:40.297929 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/56946fea-4fb3-4365-adfe-42e4a646c881-v4-0-config-user-template-login\") pod \"oauth-openshift-784cdd6c44-r7qtk\" (UID: \"56946fea-4fb3-4365-adfe-42e4a646c881\") " pod="openshift-authentication/oauth-openshift-784cdd6c44-r7qtk" Jan 29 12:12:40 crc kubenswrapper[4753]: I0129 12:12:40.299607 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/56946fea-4fb3-4365-adfe-42e4a646c881-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-784cdd6c44-r7qtk\" (UID: \"56946fea-4fb3-4365-adfe-42e4a646c881\") " pod="openshift-authentication/oauth-openshift-784cdd6c44-r7qtk" Jan 29 12:12:40 crc kubenswrapper[4753]: I0129 12:12:40.300294 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/56946fea-4fb3-4365-adfe-42e4a646c881-v4-0-config-system-serving-cert\") pod \"oauth-openshift-784cdd6c44-r7qtk\" (UID: \"56946fea-4fb3-4365-adfe-42e4a646c881\") " pod="openshift-authentication/oauth-openshift-784cdd6c44-r7qtk" Jan 29 12:12:40 crc kubenswrapper[4753]: I0129 12:12:40.300591 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/56946fea-4fb3-4365-adfe-42e4a646c881-v4-0-config-system-session\") pod \"oauth-openshift-784cdd6c44-r7qtk\" (UID: \"56946fea-4fb3-4365-adfe-42e4a646c881\") " pod="openshift-authentication/oauth-openshift-784cdd6c44-r7qtk" Jan 29 12:12:40 crc kubenswrapper[4753]: I0129 12:12:40.301639 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/56946fea-4fb3-4365-adfe-42e4a646c881-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-784cdd6c44-r7qtk\" (UID: \"56946fea-4fb3-4365-adfe-42e4a646c881\") " pod="openshift-authentication/oauth-openshift-784cdd6c44-r7qtk" Jan 29 12:12:40 crc kubenswrapper[4753]: I0129 12:12:40.303464 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/56946fea-4fb3-4365-adfe-42e4a646c881-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-784cdd6c44-r7qtk\" (UID: \"56946fea-4fb3-4365-adfe-42e4a646c881\") " pod="openshift-authentication/oauth-openshift-784cdd6c44-r7qtk" Jan 29 12:12:40 crc kubenswrapper[4753]: I0129 12:12:40.316251 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fnksk\" (UniqueName: \"kubernetes.io/projected/56946fea-4fb3-4365-adfe-42e4a646c881-kube-api-access-fnksk\") pod \"oauth-openshift-784cdd6c44-r7qtk\" (UID: \"56946fea-4fb3-4365-adfe-42e4a646c881\") " pod="openshift-authentication/oauth-openshift-784cdd6c44-r7qtk" Jan 29 12:12:40 crc kubenswrapper[4753]: I0129 12:12:40.433054 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-784cdd6c44-r7qtk" Jan 29 12:12:40 crc kubenswrapper[4753]: I0129 12:12:40.567699 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-zdhjn" event={"ID":"9728fd7e-6203-4082-9297-2d3fd9e17b74","Type":"ContainerDied","Data":"5ac04fdbe3e212ddf84391c573f7ca6c55e9e5c8d511b680b826e710c41b0856"} Jan 29 12:12:40 crc kubenswrapper[4753]: I0129 12:12:40.567775 4753 scope.go:117] "RemoveContainer" containerID="f00710772f44f4304d4013d4a1a5bcd7a1c5f1eef491b88d83a683fe1f1522f1" Jan 29 12:12:40 crc kubenswrapper[4753]: I0129 12:12:40.567940 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-zdhjn" Jan 29 12:12:40 crc kubenswrapper[4753]: I0129 12:12:40.596688 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-zdhjn"] Jan 29 12:12:40 crc kubenswrapper[4753]: I0129 12:12:40.607599 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-zdhjn"] Jan 29 12:12:40 crc kubenswrapper[4753]: I0129 12:12:40.875981 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-784cdd6c44-r7qtk"] Jan 29 12:12:41 crc kubenswrapper[4753]: I0129 12:12:41.577821 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-784cdd6c44-r7qtk" event={"ID":"56946fea-4fb3-4365-adfe-42e4a646c881","Type":"ContainerStarted","Data":"0e9b7d0ae790e00613a9aa7a45f6a626057e73cc5cd615dbd45595a339604a22"} Jan 29 12:12:41 crc kubenswrapper[4753]: I0129 12:12:41.897658 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9728fd7e-6203-4082-9297-2d3fd9e17b74" path="/var/lib/kubelet/pods/9728fd7e-6203-4082-9297-2d3fd9e17b74/volumes" Jan 29 12:12:42 crc kubenswrapper[4753]: I0129 12:12:42.586507 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-784cdd6c44-r7qtk" event={"ID":"56946fea-4fb3-4365-adfe-42e4a646c881","Type":"ContainerStarted","Data":"f0e9ee8c2d3d2d1827c53eeb7ac393ca94be6748152c77e9d3f60b5dc7ae8cdf"} Jan 29 12:12:42 crc kubenswrapper[4753]: I0129 12:12:42.587066 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-784cdd6c44-r7qtk" Jan 29 12:12:42 crc kubenswrapper[4753]: I0129 12:12:42.593332 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-784cdd6c44-r7qtk" Jan 29 12:12:42 crc kubenswrapper[4753]: I0129 12:12:42.617140 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-784cdd6c44-r7qtk" podStartSLOduration=29.617111167 podStartE2EDuration="29.617111167s" podCreationTimestamp="2026-01-29 12:12:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:12:42.608691194 +0000 UTC m=+376.860772669" watchObservedRunningTime="2026-01-29 12:12:42.617111167 +0000 UTC m=+376.869192622" Jan 29 12:12:59 crc kubenswrapper[4753]: I0129 12:12:59.252555 4753 patch_prober.go:28] interesting pod/machine-config-daemon-7c24x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 12:12:59 crc kubenswrapper[4753]: I0129 12:12:59.255284 4753 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" podUID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 12:13:23 crc kubenswrapper[4753]: I0129 12:13:23.883174 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-pcxth"] Jan 29 12:13:23 crc kubenswrapper[4753]: I0129 12:13:23.891101 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-pcxth" podUID="225f75d2-06ff-4a8e-ad48-5fb73aba9a5f" containerName="registry-server" containerID="cri-o://2338eccd74c61d27c2d4f9866a4563469740efcc951b65081239082b950b42a8" gracePeriod=30 Jan 29 12:13:23 crc kubenswrapper[4753]: I0129 12:13:23.902751 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-t89jd"] Jan 29 12:13:23 crc kubenswrapper[4753]: I0129 12:13:23.905013 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-lswml"] Jan 29 12:13:23 crc kubenswrapper[4753]: I0129 12:13:23.905986 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-t89jd" podUID="ce6846a0-6c85-4ae9-afae-b10ead46d21d" containerName="registry-server" containerID="cri-o://84bbad5f70d02ec38037634220f255a1473f68ffd747febf707eb34e1ea997e8" gracePeriod=30 Jan 29 12:13:23 crc kubenswrapper[4753]: I0129 12:13:23.905469 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-79b997595-lswml" podUID="dad90295-65db-470c-8041-19fcf86d0439" containerName="marketplace-operator" containerID="cri-o://771657a3800bbb3d40ea73f6e835cbb5b06aec1354fbf2aecd2eb76a6d3170f2" gracePeriod=30 Jan 29 12:13:24 crc kubenswrapper[4753]: I0129 12:13:24.090963 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-b52ds"] Jan 29 12:13:24 crc kubenswrapper[4753]: I0129 12:13:24.098662 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-b52ds" podUID="d818fc13-9863-4172-a818-4e01af393842" containerName="registry-server" containerID="cri-o://b67b50d642d7d5f59a4bf6aad1af7773ac9559c76201c8079ec071a120ab7a49" gracePeriod=30 Jan 29 12:13:24 crc kubenswrapper[4753]: I0129 12:13:24.117105 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-dgs2s"] Jan 29 12:13:24 crc kubenswrapper[4753]: I0129 12:13:24.117719 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-dgs2s" podUID="13c02ae6-2d17-4b39-9f64-4a7a99a6ffc6" containerName="registry-server" containerID="cri-o://319898d3a1c9621b0384b4edd10b29f9300cc14aea765872df4951cdad8b3e01" gracePeriod=30 Jan 29 12:13:24 crc kubenswrapper[4753]: I0129 12:13:24.135042 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-kmlbh"] Jan 29 12:13:24 crc kubenswrapper[4753]: I0129 12:13:24.145753 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-kmlbh" Jan 29 12:13:24 crc kubenswrapper[4753]: I0129 12:13:24.147336 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-kmlbh"] Jan 29 12:13:24 crc kubenswrapper[4753]: I0129 12:13:24.187799 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qtkfv\" (UniqueName: \"kubernetes.io/projected/8c12a149-f214-43c8-a40d-57c615cbe69e-kube-api-access-qtkfv\") pod \"marketplace-operator-79b997595-kmlbh\" (UID: \"8c12a149-f214-43c8-a40d-57c615cbe69e\") " pod="openshift-marketplace/marketplace-operator-79b997595-kmlbh" Jan 29 12:13:24 crc kubenswrapper[4753]: I0129 12:13:24.187864 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8c12a149-f214-43c8-a40d-57c615cbe69e-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-kmlbh\" (UID: \"8c12a149-f214-43c8-a40d-57c615cbe69e\") " pod="openshift-marketplace/marketplace-operator-79b997595-kmlbh" Jan 29 12:13:24 crc kubenswrapper[4753]: I0129 12:13:24.187901 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/8c12a149-f214-43c8-a40d-57c615cbe69e-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-kmlbh\" (UID: \"8c12a149-f214-43c8-a40d-57c615cbe69e\") " pod="openshift-marketplace/marketplace-operator-79b997595-kmlbh" Jan 29 12:13:24 crc kubenswrapper[4753]: I0129 12:13:24.289115 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/8c12a149-f214-43c8-a40d-57c615cbe69e-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-kmlbh\" (UID: \"8c12a149-f214-43c8-a40d-57c615cbe69e\") " pod="openshift-marketplace/marketplace-operator-79b997595-kmlbh" Jan 29 12:13:24 crc kubenswrapper[4753]: I0129 12:13:24.289210 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qtkfv\" (UniqueName: \"kubernetes.io/projected/8c12a149-f214-43c8-a40d-57c615cbe69e-kube-api-access-qtkfv\") pod \"marketplace-operator-79b997595-kmlbh\" (UID: \"8c12a149-f214-43c8-a40d-57c615cbe69e\") " pod="openshift-marketplace/marketplace-operator-79b997595-kmlbh" Jan 29 12:13:24 crc kubenswrapper[4753]: I0129 12:13:24.289248 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8c12a149-f214-43c8-a40d-57c615cbe69e-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-kmlbh\" (UID: \"8c12a149-f214-43c8-a40d-57c615cbe69e\") " pod="openshift-marketplace/marketplace-operator-79b997595-kmlbh" Jan 29 12:13:24 crc kubenswrapper[4753]: I0129 12:13:24.290994 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8c12a149-f214-43c8-a40d-57c615cbe69e-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-kmlbh\" (UID: \"8c12a149-f214-43c8-a40d-57c615cbe69e\") " pod="openshift-marketplace/marketplace-operator-79b997595-kmlbh" Jan 29 12:13:24 crc kubenswrapper[4753]: I0129 12:13:24.302130 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/8c12a149-f214-43c8-a40d-57c615cbe69e-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-kmlbh\" (UID: \"8c12a149-f214-43c8-a40d-57c615cbe69e\") " pod="openshift-marketplace/marketplace-operator-79b997595-kmlbh" Jan 29 12:13:24 crc kubenswrapper[4753]: I0129 12:13:24.317035 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qtkfv\" (UniqueName: \"kubernetes.io/projected/8c12a149-f214-43c8-a40d-57c615cbe69e-kube-api-access-qtkfv\") pod \"marketplace-operator-79b997595-kmlbh\" (UID: \"8c12a149-f214-43c8-a40d-57c615cbe69e\") " pod="openshift-marketplace/marketplace-operator-79b997595-kmlbh" Jan 29 12:13:24 crc kubenswrapper[4753]: E0129 12:13:24.533934 4753 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 319898d3a1c9621b0384b4edd10b29f9300cc14aea765872df4951cdad8b3e01 is running failed: container process not found" containerID="319898d3a1c9621b0384b4edd10b29f9300cc14aea765872df4951cdad8b3e01" cmd=["grpc_health_probe","-addr=:50051"] Jan 29 12:13:24 crc kubenswrapper[4753]: E0129 12:13:24.543276 4753 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 319898d3a1c9621b0384b4edd10b29f9300cc14aea765872df4951cdad8b3e01 is running failed: container process not found" containerID="319898d3a1c9621b0384b4edd10b29f9300cc14aea765872df4951cdad8b3e01" cmd=["grpc_health_probe","-addr=:50051"] Jan 29 12:13:24 crc kubenswrapper[4753]: E0129 12:13:24.544242 4753 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 319898d3a1c9621b0384b4edd10b29f9300cc14aea765872df4951cdad8b3e01 is running failed: container process not found" containerID="319898d3a1c9621b0384b4edd10b29f9300cc14aea765872df4951cdad8b3e01" cmd=["grpc_health_probe","-addr=:50051"] Jan 29 12:13:24 crc kubenswrapper[4753]: E0129 12:13:24.544330 4753 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 319898d3a1c9621b0384b4edd10b29f9300cc14aea765872df4951cdad8b3e01 is running failed: container process not found" probeType="Readiness" pod="openshift-marketplace/redhat-operators-dgs2s" podUID="13c02ae6-2d17-4b39-9f64-4a7a99a6ffc6" containerName="registry-server" Jan 29 12:13:24 crc kubenswrapper[4753]: I0129 12:13:24.633453 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-kmlbh" Jan 29 12:13:24 crc kubenswrapper[4753]: I0129 12:13:24.643712 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-lswml" Jan 29 12:13:24 crc kubenswrapper[4753]: I0129 12:13:24.706645 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dgs2s" Jan 29 12:13:24 crc kubenswrapper[4753]: I0129 12:13:24.712644 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-pcxth" Jan 29 12:13:24 crc kubenswrapper[4753]: I0129 12:13:24.722417 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-t89jd" Jan 29 12:13:24 crc kubenswrapper[4753]: I0129 12:13:24.729383 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-b52ds" Jan 29 12:13:24 crc kubenswrapper[4753]: I0129 12:13:24.996861 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d818fc13-9863-4172-a818-4e01af393842-catalog-content\") pod \"d818fc13-9863-4172-a818-4e01af393842\" (UID: \"d818fc13-9863-4172-a818-4e01af393842\") " Jan 29 12:13:24 crc kubenswrapper[4753]: I0129 12:13:24.996912 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/dad90295-65db-470c-8041-19fcf86d0439-marketplace-operator-metrics\") pod \"dad90295-65db-470c-8041-19fcf86d0439\" (UID: \"dad90295-65db-470c-8041-19fcf86d0439\") " Jan 29 12:13:24 crc kubenswrapper[4753]: I0129 12:13:24.996963 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ce6846a0-6c85-4ae9-afae-b10ead46d21d-utilities\") pod \"ce6846a0-6c85-4ae9-afae-b10ead46d21d\" (UID: \"ce6846a0-6c85-4ae9-afae-b10ead46d21d\") " Jan 29 12:13:24 crc kubenswrapper[4753]: I0129 12:13:24.997002 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/13c02ae6-2d17-4b39-9f64-4a7a99a6ffc6-utilities\") pod \"13c02ae6-2d17-4b39-9f64-4a7a99a6ffc6\" (UID: \"13c02ae6-2d17-4b39-9f64-4a7a99a6ffc6\") " Jan 29 12:13:24 crc kubenswrapper[4753]: I0129 12:13:24.997047 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/13c02ae6-2d17-4b39-9f64-4a7a99a6ffc6-catalog-content\") pod \"13c02ae6-2d17-4b39-9f64-4a7a99a6ffc6\" (UID: \"13c02ae6-2d17-4b39-9f64-4a7a99a6ffc6\") " Jan 29 12:13:24 crc kubenswrapper[4753]: I0129 12:13:24.997077 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ce6846a0-6c85-4ae9-afae-b10ead46d21d-catalog-content\") pod \"ce6846a0-6c85-4ae9-afae-b10ead46d21d\" (UID: \"ce6846a0-6c85-4ae9-afae-b10ead46d21d\") " Jan 29 12:13:24 crc kubenswrapper[4753]: I0129 12:13:24.997107 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d818fc13-9863-4172-a818-4e01af393842-utilities\") pod \"d818fc13-9863-4172-a818-4e01af393842\" (UID: \"d818fc13-9863-4172-a818-4e01af393842\") " Jan 29 12:13:24 crc kubenswrapper[4753]: I0129 12:13:24.997136 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2bhr9\" (UniqueName: \"kubernetes.io/projected/d818fc13-9863-4172-a818-4e01af393842-kube-api-access-2bhr9\") pod \"d818fc13-9863-4172-a818-4e01af393842\" (UID: \"d818fc13-9863-4172-a818-4e01af393842\") " Jan 29 12:13:24 crc kubenswrapper[4753]: I0129 12:13:24.997185 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/dad90295-65db-470c-8041-19fcf86d0439-marketplace-trusted-ca\") pod \"dad90295-65db-470c-8041-19fcf86d0439\" (UID: \"dad90295-65db-470c-8041-19fcf86d0439\") " Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.003264 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8t92k\" (UniqueName: \"kubernetes.io/projected/dad90295-65db-470c-8041-19fcf86d0439-kube-api-access-8t92k\") pod \"dad90295-65db-470c-8041-19fcf86d0439\" (UID: \"dad90295-65db-470c-8041-19fcf86d0439\") " Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.003383 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg8l8\" (UniqueName: \"kubernetes.io/projected/ce6846a0-6c85-4ae9-afae-b10ead46d21d-kube-api-access-mg8l8\") pod \"ce6846a0-6c85-4ae9-afae-b10ead46d21d\" (UID: \"ce6846a0-6c85-4ae9-afae-b10ead46d21d\") " Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.003414 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pbc79\" (UniqueName: \"kubernetes.io/projected/225f75d2-06ff-4a8e-ad48-5fb73aba9a5f-kube-api-access-pbc79\") pod \"225f75d2-06ff-4a8e-ad48-5fb73aba9a5f\" (UID: \"225f75d2-06ff-4a8e-ad48-5fb73aba9a5f\") " Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.003462 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t7r8k\" (UniqueName: \"kubernetes.io/projected/13c02ae6-2d17-4b39-9f64-4a7a99a6ffc6-kube-api-access-t7r8k\") pod \"13c02ae6-2d17-4b39-9f64-4a7a99a6ffc6\" (UID: \"13c02ae6-2d17-4b39-9f64-4a7a99a6ffc6\") " Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.003502 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/225f75d2-06ff-4a8e-ad48-5fb73aba9a5f-catalog-content\") pod \"225f75d2-06ff-4a8e-ad48-5fb73aba9a5f\" (UID: \"225f75d2-06ff-4a8e-ad48-5fb73aba9a5f\") " Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.003532 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/225f75d2-06ff-4a8e-ad48-5fb73aba9a5f-utilities\") pod \"225f75d2-06ff-4a8e-ad48-5fb73aba9a5f\" (UID: \"225f75d2-06ff-4a8e-ad48-5fb73aba9a5f\") " Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.009677 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dad90295-65db-470c-8041-19fcf86d0439-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "dad90295-65db-470c-8041-19fcf86d0439" (UID: "dad90295-65db-470c-8041-19fcf86d0439"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.010410 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ce6846a0-6c85-4ae9-afae-b10ead46d21d-kube-api-access-mg8l8" (OuterVolumeSpecName: "kube-api-access-mg8l8") pod "ce6846a0-6c85-4ae9-afae-b10ead46d21d" (UID: "ce6846a0-6c85-4ae9-afae-b10ead46d21d"). InnerVolumeSpecName "kube-api-access-mg8l8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.010813 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/225f75d2-06ff-4a8e-ad48-5fb73aba9a5f-utilities" (OuterVolumeSpecName: "utilities") pod "225f75d2-06ff-4a8e-ad48-5fb73aba9a5f" (UID: "225f75d2-06ff-4a8e-ad48-5fb73aba9a5f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.012178 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d818fc13-9863-4172-a818-4e01af393842-utilities" (OuterVolumeSpecName: "utilities") pod "d818fc13-9863-4172-a818-4e01af393842" (UID: "d818fc13-9863-4172-a818-4e01af393842"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.012309 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/225f75d2-06ff-4a8e-ad48-5fb73aba9a5f-kube-api-access-pbc79" (OuterVolumeSpecName: "kube-api-access-pbc79") pod "225f75d2-06ff-4a8e-ad48-5fb73aba9a5f" (UID: "225f75d2-06ff-4a8e-ad48-5fb73aba9a5f"). InnerVolumeSpecName "kube-api-access-pbc79". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.012440 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d818fc13-9863-4172-a818-4e01af393842-kube-api-access-2bhr9" (OuterVolumeSpecName: "kube-api-access-2bhr9") pod "d818fc13-9863-4172-a818-4e01af393842" (UID: "d818fc13-9863-4172-a818-4e01af393842"). InnerVolumeSpecName "kube-api-access-2bhr9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.013287 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ce6846a0-6c85-4ae9-afae-b10ead46d21d-utilities" (OuterVolumeSpecName: "utilities") pod "ce6846a0-6c85-4ae9-afae-b10ead46d21d" (UID: "ce6846a0-6c85-4ae9-afae-b10ead46d21d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.014604 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dad90295-65db-470c-8041-19fcf86d0439-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "dad90295-65db-470c-8041-19fcf86d0439" (UID: "dad90295-65db-470c-8041-19fcf86d0439"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.014783 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/13c02ae6-2d17-4b39-9f64-4a7a99a6ffc6-utilities" (OuterVolumeSpecName: "utilities") pod "13c02ae6-2d17-4b39-9f64-4a7a99a6ffc6" (UID: "13c02ae6-2d17-4b39-9f64-4a7a99a6ffc6"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.017103 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dad90295-65db-470c-8041-19fcf86d0439-kube-api-access-8t92k" (OuterVolumeSpecName: "kube-api-access-8t92k") pod "dad90295-65db-470c-8041-19fcf86d0439" (UID: "dad90295-65db-470c-8041-19fcf86d0439"). InnerVolumeSpecName "kube-api-access-8t92k". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.021022 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/13c02ae6-2d17-4b39-9f64-4a7a99a6ffc6-kube-api-access-t7r8k" (OuterVolumeSpecName: "kube-api-access-t7r8k") pod "13c02ae6-2d17-4b39-9f64-4a7a99a6ffc6" (UID: "13c02ae6-2d17-4b39-9f64-4a7a99a6ffc6"). InnerVolumeSpecName "kube-api-access-t7r8k". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.085613 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d818fc13-9863-4172-a818-4e01af393842-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d818fc13-9863-4172-a818-4e01af393842" (UID: "d818fc13-9863-4172-a818-4e01af393842"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.108834 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t7r8k\" (UniqueName: \"kubernetes.io/projected/13c02ae6-2d17-4b39-9f64-4a7a99a6ffc6-kube-api-access-t7r8k\") on node \"crc\" DevicePath \"\"" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.108881 4753 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/225f75d2-06ff-4a8e-ad48-5fb73aba9a5f-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.108895 4753 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d818fc13-9863-4172-a818-4e01af393842-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.108904 4753 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/dad90295-65db-470c-8041-19fcf86d0439-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.108917 4753 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ce6846a0-6c85-4ae9-afae-b10ead46d21d-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.108925 4753 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/13c02ae6-2d17-4b39-9f64-4a7a99a6ffc6-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.108934 4753 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d818fc13-9863-4172-a818-4e01af393842-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.108943 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2bhr9\" (UniqueName: \"kubernetes.io/projected/d818fc13-9863-4172-a818-4e01af393842-kube-api-access-2bhr9\") on node \"crc\" DevicePath \"\"" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.108952 4753 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/dad90295-65db-470c-8041-19fcf86d0439-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.108960 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8t92k\" (UniqueName: \"kubernetes.io/projected/dad90295-65db-470c-8041-19fcf86d0439-kube-api-access-8t92k\") on node \"crc\" DevicePath \"\"" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.108969 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg8l8\" (UniqueName: \"kubernetes.io/projected/ce6846a0-6c85-4ae9-afae-b10ead46d21d-kube-api-access-mg8l8\") on node \"crc\" DevicePath \"\"" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.108979 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pbc79\" (UniqueName: \"kubernetes.io/projected/225f75d2-06ff-4a8e-ad48-5fb73aba9a5f-kube-api-access-pbc79\") on node \"crc\" DevicePath \"\"" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.119701 4753 generic.go:334] "Generic (PLEG): container finished" podID="d818fc13-9863-4172-a818-4e01af393842" containerID="b67b50d642d7d5f59a4bf6aad1af7773ac9559c76201c8079ec071a120ab7a49" exitCode=0 Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.119793 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-b52ds" event={"ID":"d818fc13-9863-4172-a818-4e01af393842","Type":"ContainerDied","Data":"b67b50d642d7d5f59a4bf6aad1af7773ac9559c76201c8079ec071a120ab7a49"} Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.119844 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-b52ds" event={"ID":"d818fc13-9863-4172-a818-4e01af393842","Type":"ContainerDied","Data":"5e5726067fab07574a284c6283182c0aebd5cf06746b30d83cdb1ab5d1846519"} Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.119908 4753 scope.go:117] "RemoveContainer" containerID="b67b50d642d7d5f59a4bf6aad1af7773ac9559c76201c8079ec071a120ab7a49" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.120108 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-b52ds" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.122774 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ce6846a0-6c85-4ae9-afae-b10ead46d21d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ce6846a0-6c85-4ae9-afae-b10ead46d21d" (UID: "ce6846a0-6c85-4ae9-afae-b10ead46d21d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.128451 4753 generic.go:334] "Generic (PLEG): container finished" podID="225f75d2-06ff-4a8e-ad48-5fb73aba9a5f" containerID="2338eccd74c61d27c2d4f9866a4563469740efcc951b65081239082b950b42a8" exitCode=0 Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.128516 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-pcxth" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.128583 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pcxth" event={"ID":"225f75d2-06ff-4a8e-ad48-5fb73aba9a5f","Type":"ContainerDied","Data":"2338eccd74c61d27c2d4f9866a4563469740efcc951b65081239082b950b42a8"} Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.128650 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pcxth" event={"ID":"225f75d2-06ff-4a8e-ad48-5fb73aba9a5f","Type":"ContainerDied","Data":"f007ee8d1dc94fa1e56a9e37455b63b58c720074b701d7e1ca3eb21b5f74b718"} Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.128808 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/225f75d2-06ff-4a8e-ad48-5fb73aba9a5f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "225f75d2-06ff-4a8e-ad48-5fb73aba9a5f" (UID: "225f75d2-06ff-4a8e-ad48-5fb73aba9a5f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.132908 4753 generic.go:334] "Generic (PLEG): container finished" podID="ce6846a0-6c85-4ae9-afae-b10ead46d21d" containerID="84bbad5f70d02ec38037634220f255a1473f68ffd747febf707eb34e1ea997e8" exitCode=0 Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.133001 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-t89jd" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.133045 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-t89jd" event={"ID":"ce6846a0-6c85-4ae9-afae-b10ead46d21d","Type":"ContainerDied","Data":"84bbad5f70d02ec38037634220f255a1473f68ffd747febf707eb34e1ea997e8"} Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.133086 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-t89jd" event={"ID":"ce6846a0-6c85-4ae9-afae-b10ead46d21d","Type":"ContainerDied","Data":"b30f9f9be4fa57fff95b3cd069500f85e18b2e954bc7dd46bc749f91ee31c64c"} Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.138214 4753 generic.go:334] "Generic (PLEG): container finished" podID="dad90295-65db-470c-8041-19fcf86d0439" containerID="771657a3800bbb3d40ea73f6e835cbb5b06aec1354fbf2aecd2eb76a6d3170f2" exitCode=0 Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.138309 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-lswml" event={"ID":"dad90295-65db-470c-8041-19fcf86d0439","Type":"ContainerDied","Data":"771657a3800bbb3d40ea73f6e835cbb5b06aec1354fbf2aecd2eb76a6d3170f2"} Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.138347 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-lswml" event={"ID":"dad90295-65db-470c-8041-19fcf86d0439","Type":"ContainerDied","Data":"d279b84d2d4a48ef2632e77a7739bcae1f51874d8c70cae02fb31bafc0fb0f34"} Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.138422 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-lswml" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.142893 4753 generic.go:334] "Generic (PLEG): container finished" podID="13c02ae6-2d17-4b39-9f64-4a7a99a6ffc6" containerID="319898d3a1c9621b0384b4edd10b29f9300cc14aea765872df4951cdad8b3e01" exitCode=0 Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.142942 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dgs2s" event={"ID":"13c02ae6-2d17-4b39-9f64-4a7a99a6ffc6","Type":"ContainerDied","Data":"319898d3a1c9621b0384b4edd10b29f9300cc14aea765872df4951cdad8b3e01"} Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.142972 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dgs2s" event={"ID":"13c02ae6-2d17-4b39-9f64-4a7a99a6ffc6","Type":"ContainerDied","Data":"ce2df2f05c3545fe8420be669b7d215a797e028d4ba03e059c7db276b0b06280"} Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.143041 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dgs2s" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.152742 4753 scope.go:117] "RemoveContainer" containerID="5f1e9c6fe1e30e2b371f1da5742b90fcbdfdddff41df4ea5c51ceaac7f7afef8" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.166184 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/13c02ae6-2d17-4b39-9f64-4a7a99a6ffc6-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "13c02ae6-2d17-4b39-9f64-4a7a99a6ffc6" (UID: "13c02ae6-2d17-4b39-9f64-4a7a99a6ffc6"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.189785 4753 scope.go:117] "RemoveContainer" containerID="33edee0526bd35ce082b98012b2a099d3dac7fdf927c548b0acc82f4b3237602" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.193647 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-b52ds"] Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.200294 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-b52ds"] Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.206800 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-kmlbh"] Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.209994 4753 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/225f75d2-06ff-4a8e-ad48-5fb73aba9a5f-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.210338 4753 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/13c02ae6-2d17-4b39-9f64-4a7a99a6ffc6-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.210359 4753 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ce6846a0-6c85-4ae9-afae-b10ead46d21d-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.222641 4753 scope.go:117] "RemoveContainer" containerID="b67b50d642d7d5f59a4bf6aad1af7773ac9559c76201c8079ec071a120ab7a49" Jan 29 12:13:25 crc kubenswrapper[4753]: E0129 12:13:25.223500 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b67b50d642d7d5f59a4bf6aad1af7773ac9559c76201c8079ec071a120ab7a49\": container with ID starting with b67b50d642d7d5f59a4bf6aad1af7773ac9559c76201c8079ec071a120ab7a49 not found: ID does not exist" containerID="b67b50d642d7d5f59a4bf6aad1af7773ac9559c76201c8079ec071a120ab7a49" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.223571 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b67b50d642d7d5f59a4bf6aad1af7773ac9559c76201c8079ec071a120ab7a49"} err="failed to get container status \"b67b50d642d7d5f59a4bf6aad1af7773ac9559c76201c8079ec071a120ab7a49\": rpc error: code = NotFound desc = could not find container \"b67b50d642d7d5f59a4bf6aad1af7773ac9559c76201c8079ec071a120ab7a49\": container with ID starting with b67b50d642d7d5f59a4bf6aad1af7773ac9559c76201c8079ec071a120ab7a49 not found: ID does not exist" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.223612 4753 scope.go:117] "RemoveContainer" containerID="5f1e9c6fe1e30e2b371f1da5742b90fcbdfdddff41df4ea5c51ceaac7f7afef8" Jan 29 12:13:25 crc kubenswrapper[4753]: E0129 12:13:25.224400 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5f1e9c6fe1e30e2b371f1da5742b90fcbdfdddff41df4ea5c51ceaac7f7afef8\": container with ID starting with 5f1e9c6fe1e30e2b371f1da5742b90fcbdfdddff41df4ea5c51ceaac7f7afef8 not found: ID does not exist" containerID="5f1e9c6fe1e30e2b371f1da5742b90fcbdfdddff41df4ea5c51ceaac7f7afef8" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.224465 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5f1e9c6fe1e30e2b371f1da5742b90fcbdfdddff41df4ea5c51ceaac7f7afef8"} err="failed to get container status \"5f1e9c6fe1e30e2b371f1da5742b90fcbdfdddff41df4ea5c51ceaac7f7afef8\": rpc error: code = NotFound desc = could not find container \"5f1e9c6fe1e30e2b371f1da5742b90fcbdfdddff41df4ea5c51ceaac7f7afef8\": container with ID starting with 5f1e9c6fe1e30e2b371f1da5742b90fcbdfdddff41df4ea5c51ceaac7f7afef8 not found: ID does not exist" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.224527 4753 scope.go:117] "RemoveContainer" containerID="33edee0526bd35ce082b98012b2a099d3dac7fdf927c548b0acc82f4b3237602" Jan 29 12:13:25 crc kubenswrapper[4753]: E0129 12:13:25.224979 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"33edee0526bd35ce082b98012b2a099d3dac7fdf927c548b0acc82f4b3237602\": container with ID starting with 33edee0526bd35ce082b98012b2a099d3dac7fdf927c548b0acc82f4b3237602 not found: ID does not exist" containerID="33edee0526bd35ce082b98012b2a099d3dac7fdf927c548b0acc82f4b3237602" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.225052 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"33edee0526bd35ce082b98012b2a099d3dac7fdf927c548b0acc82f4b3237602"} err="failed to get container status \"33edee0526bd35ce082b98012b2a099d3dac7fdf927c548b0acc82f4b3237602\": rpc error: code = NotFound desc = could not find container \"33edee0526bd35ce082b98012b2a099d3dac7fdf927c548b0acc82f4b3237602\": container with ID starting with 33edee0526bd35ce082b98012b2a099d3dac7fdf927c548b0acc82f4b3237602 not found: ID does not exist" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.225096 4753 scope.go:117] "RemoveContainer" containerID="2338eccd74c61d27c2d4f9866a4563469740efcc951b65081239082b950b42a8" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.229818 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-t89jd"] Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.238054 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-t89jd"] Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.247106 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-lswml"] Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.253250 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-lswml"] Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.300462 4753 scope.go:117] "RemoveContainer" containerID="5ea372da213825ce784805ec1157049fbc3a85f84ec64a4a69d94a53be8e9f93" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.330246 4753 scope.go:117] "RemoveContainer" containerID="4e5f7ed97e213c66f79f0331b2c0bcb3c9f1f860e531af2ef4c7ae4165b2eb97" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.353211 4753 scope.go:117] "RemoveContainer" containerID="2338eccd74c61d27c2d4f9866a4563469740efcc951b65081239082b950b42a8" Jan 29 12:13:25 crc kubenswrapper[4753]: E0129 12:13:25.353948 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2338eccd74c61d27c2d4f9866a4563469740efcc951b65081239082b950b42a8\": container with ID starting with 2338eccd74c61d27c2d4f9866a4563469740efcc951b65081239082b950b42a8 not found: ID does not exist" containerID="2338eccd74c61d27c2d4f9866a4563469740efcc951b65081239082b950b42a8" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.354000 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2338eccd74c61d27c2d4f9866a4563469740efcc951b65081239082b950b42a8"} err="failed to get container status \"2338eccd74c61d27c2d4f9866a4563469740efcc951b65081239082b950b42a8\": rpc error: code = NotFound desc = could not find container \"2338eccd74c61d27c2d4f9866a4563469740efcc951b65081239082b950b42a8\": container with ID starting with 2338eccd74c61d27c2d4f9866a4563469740efcc951b65081239082b950b42a8 not found: ID does not exist" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.354039 4753 scope.go:117] "RemoveContainer" containerID="5ea372da213825ce784805ec1157049fbc3a85f84ec64a4a69d94a53be8e9f93" Jan 29 12:13:25 crc kubenswrapper[4753]: E0129 12:13:25.354716 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5ea372da213825ce784805ec1157049fbc3a85f84ec64a4a69d94a53be8e9f93\": container with ID starting with 5ea372da213825ce784805ec1157049fbc3a85f84ec64a4a69d94a53be8e9f93 not found: ID does not exist" containerID="5ea372da213825ce784805ec1157049fbc3a85f84ec64a4a69d94a53be8e9f93" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.354844 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5ea372da213825ce784805ec1157049fbc3a85f84ec64a4a69d94a53be8e9f93"} err="failed to get container status \"5ea372da213825ce784805ec1157049fbc3a85f84ec64a4a69d94a53be8e9f93\": rpc error: code = NotFound desc = could not find container \"5ea372da213825ce784805ec1157049fbc3a85f84ec64a4a69d94a53be8e9f93\": container with ID starting with 5ea372da213825ce784805ec1157049fbc3a85f84ec64a4a69d94a53be8e9f93 not found: ID does not exist" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.354891 4753 scope.go:117] "RemoveContainer" containerID="4e5f7ed97e213c66f79f0331b2c0bcb3c9f1f860e531af2ef4c7ae4165b2eb97" Jan 29 12:13:25 crc kubenswrapper[4753]: E0129 12:13:25.355391 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4e5f7ed97e213c66f79f0331b2c0bcb3c9f1f860e531af2ef4c7ae4165b2eb97\": container with ID starting with 4e5f7ed97e213c66f79f0331b2c0bcb3c9f1f860e531af2ef4c7ae4165b2eb97 not found: ID does not exist" containerID="4e5f7ed97e213c66f79f0331b2c0bcb3c9f1f860e531af2ef4c7ae4165b2eb97" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.355463 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4e5f7ed97e213c66f79f0331b2c0bcb3c9f1f860e531af2ef4c7ae4165b2eb97"} err="failed to get container status \"4e5f7ed97e213c66f79f0331b2c0bcb3c9f1f860e531af2ef4c7ae4165b2eb97\": rpc error: code = NotFound desc = could not find container \"4e5f7ed97e213c66f79f0331b2c0bcb3c9f1f860e531af2ef4c7ae4165b2eb97\": container with ID starting with 4e5f7ed97e213c66f79f0331b2c0bcb3c9f1f860e531af2ef4c7ae4165b2eb97 not found: ID does not exist" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.355491 4753 scope.go:117] "RemoveContainer" containerID="84bbad5f70d02ec38037634220f255a1473f68ffd747febf707eb34e1ea997e8" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.390671 4753 scope.go:117] "RemoveContainer" containerID="82b49ac4e60915453bc7c1f7c8e1ba68a6c643163b8f03b8eca0416b5c29ea2c" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.428975 4753 scope.go:117] "RemoveContainer" containerID="722ecd34f28ac3858447e086a8206c6587de0c3b85a51c6563402fa891748423" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.446110 4753 scope.go:117] "RemoveContainer" containerID="84bbad5f70d02ec38037634220f255a1473f68ffd747febf707eb34e1ea997e8" Jan 29 12:13:25 crc kubenswrapper[4753]: E0129 12:13:25.446743 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"84bbad5f70d02ec38037634220f255a1473f68ffd747febf707eb34e1ea997e8\": container with ID starting with 84bbad5f70d02ec38037634220f255a1473f68ffd747febf707eb34e1ea997e8 not found: ID does not exist" containerID="84bbad5f70d02ec38037634220f255a1473f68ffd747febf707eb34e1ea997e8" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.446809 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"84bbad5f70d02ec38037634220f255a1473f68ffd747febf707eb34e1ea997e8"} err="failed to get container status \"84bbad5f70d02ec38037634220f255a1473f68ffd747febf707eb34e1ea997e8\": rpc error: code = NotFound desc = could not find container \"84bbad5f70d02ec38037634220f255a1473f68ffd747febf707eb34e1ea997e8\": container with ID starting with 84bbad5f70d02ec38037634220f255a1473f68ffd747febf707eb34e1ea997e8 not found: ID does not exist" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.446870 4753 scope.go:117] "RemoveContainer" containerID="82b49ac4e60915453bc7c1f7c8e1ba68a6c643163b8f03b8eca0416b5c29ea2c" Jan 29 12:13:25 crc kubenswrapper[4753]: E0129 12:13:25.447393 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"82b49ac4e60915453bc7c1f7c8e1ba68a6c643163b8f03b8eca0416b5c29ea2c\": container with ID starting with 82b49ac4e60915453bc7c1f7c8e1ba68a6c643163b8f03b8eca0416b5c29ea2c not found: ID does not exist" containerID="82b49ac4e60915453bc7c1f7c8e1ba68a6c643163b8f03b8eca0416b5c29ea2c" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.447449 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"82b49ac4e60915453bc7c1f7c8e1ba68a6c643163b8f03b8eca0416b5c29ea2c"} err="failed to get container status \"82b49ac4e60915453bc7c1f7c8e1ba68a6c643163b8f03b8eca0416b5c29ea2c\": rpc error: code = NotFound desc = could not find container \"82b49ac4e60915453bc7c1f7c8e1ba68a6c643163b8f03b8eca0416b5c29ea2c\": container with ID starting with 82b49ac4e60915453bc7c1f7c8e1ba68a6c643163b8f03b8eca0416b5c29ea2c not found: ID does not exist" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.447474 4753 scope.go:117] "RemoveContainer" containerID="722ecd34f28ac3858447e086a8206c6587de0c3b85a51c6563402fa891748423" Jan 29 12:13:25 crc kubenswrapper[4753]: E0129 12:13:25.447778 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"722ecd34f28ac3858447e086a8206c6587de0c3b85a51c6563402fa891748423\": container with ID starting with 722ecd34f28ac3858447e086a8206c6587de0c3b85a51c6563402fa891748423 not found: ID does not exist" containerID="722ecd34f28ac3858447e086a8206c6587de0c3b85a51c6563402fa891748423" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.447820 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"722ecd34f28ac3858447e086a8206c6587de0c3b85a51c6563402fa891748423"} err="failed to get container status \"722ecd34f28ac3858447e086a8206c6587de0c3b85a51c6563402fa891748423\": rpc error: code = NotFound desc = could not find container \"722ecd34f28ac3858447e086a8206c6587de0c3b85a51c6563402fa891748423\": container with ID starting with 722ecd34f28ac3858447e086a8206c6587de0c3b85a51c6563402fa891748423 not found: ID does not exist" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.447841 4753 scope.go:117] "RemoveContainer" containerID="771657a3800bbb3d40ea73f6e835cbb5b06aec1354fbf2aecd2eb76a6d3170f2" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.464383 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-pcxth"] Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.469071 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-pcxth"] Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.480849 4753 scope.go:117] "RemoveContainer" containerID="3edfd10e3371577561cb5294e3223259b5cdccc67802061c5edac4bd5dea3ee0" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.513852 4753 scope.go:117] "RemoveContainer" containerID="771657a3800bbb3d40ea73f6e835cbb5b06aec1354fbf2aecd2eb76a6d3170f2" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.514620 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-dgs2s"] Jan 29 12:13:25 crc kubenswrapper[4753]: E0129 12:13:25.515472 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"771657a3800bbb3d40ea73f6e835cbb5b06aec1354fbf2aecd2eb76a6d3170f2\": container with ID starting with 771657a3800bbb3d40ea73f6e835cbb5b06aec1354fbf2aecd2eb76a6d3170f2 not found: ID does not exist" containerID="771657a3800bbb3d40ea73f6e835cbb5b06aec1354fbf2aecd2eb76a6d3170f2" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.515513 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"771657a3800bbb3d40ea73f6e835cbb5b06aec1354fbf2aecd2eb76a6d3170f2"} err="failed to get container status \"771657a3800bbb3d40ea73f6e835cbb5b06aec1354fbf2aecd2eb76a6d3170f2\": rpc error: code = NotFound desc = could not find container \"771657a3800bbb3d40ea73f6e835cbb5b06aec1354fbf2aecd2eb76a6d3170f2\": container with ID starting with 771657a3800bbb3d40ea73f6e835cbb5b06aec1354fbf2aecd2eb76a6d3170f2 not found: ID does not exist" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.515543 4753 scope.go:117] "RemoveContainer" containerID="3edfd10e3371577561cb5294e3223259b5cdccc67802061c5edac4bd5dea3ee0" Jan 29 12:13:25 crc kubenswrapper[4753]: E0129 12:13:25.516609 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3edfd10e3371577561cb5294e3223259b5cdccc67802061c5edac4bd5dea3ee0\": container with ID starting with 3edfd10e3371577561cb5294e3223259b5cdccc67802061c5edac4bd5dea3ee0 not found: ID does not exist" containerID="3edfd10e3371577561cb5294e3223259b5cdccc67802061c5edac4bd5dea3ee0" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.516651 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3edfd10e3371577561cb5294e3223259b5cdccc67802061c5edac4bd5dea3ee0"} err="failed to get container status \"3edfd10e3371577561cb5294e3223259b5cdccc67802061c5edac4bd5dea3ee0\": rpc error: code = NotFound desc = could not find container \"3edfd10e3371577561cb5294e3223259b5cdccc67802061c5edac4bd5dea3ee0\": container with ID starting with 3edfd10e3371577561cb5294e3223259b5cdccc67802061c5edac4bd5dea3ee0 not found: ID does not exist" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.516672 4753 scope.go:117] "RemoveContainer" containerID="319898d3a1c9621b0384b4edd10b29f9300cc14aea765872df4951cdad8b3e01" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.535903 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-dgs2s"] Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.538169 4753 scope.go:117] "RemoveContainer" containerID="94ba13ac0c6370a35cda4438762c0ee8998bc9580dfd603301d8a0ac86f5f218" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.563765 4753 scope.go:117] "RemoveContainer" containerID="bf1c8d45dc92bd08c960afa54a9a560ff456e1f48c9f29d50cfc4b5575df807c" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.646024 4753 scope.go:117] "RemoveContainer" containerID="319898d3a1c9621b0384b4edd10b29f9300cc14aea765872df4951cdad8b3e01" Jan 29 12:13:25 crc kubenswrapper[4753]: E0129 12:13:25.677638 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"319898d3a1c9621b0384b4edd10b29f9300cc14aea765872df4951cdad8b3e01\": container with ID starting with 319898d3a1c9621b0384b4edd10b29f9300cc14aea765872df4951cdad8b3e01 not found: ID does not exist" containerID="319898d3a1c9621b0384b4edd10b29f9300cc14aea765872df4951cdad8b3e01" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.678085 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"319898d3a1c9621b0384b4edd10b29f9300cc14aea765872df4951cdad8b3e01"} err="failed to get container status \"319898d3a1c9621b0384b4edd10b29f9300cc14aea765872df4951cdad8b3e01\": rpc error: code = NotFound desc = could not find container \"319898d3a1c9621b0384b4edd10b29f9300cc14aea765872df4951cdad8b3e01\": container with ID starting with 319898d3a1c9621b0384b4edd10b29f9300cc14aea765872df4951cdad8b3e01 not found: ID does not exist" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.678363 4753 scope.go:117] "RemoveContainer" containerID="94ba13ac0c6370a35cda4438762c0ee8998bc9580dfd603301d8a0ac86f5f218" Jan 29 12:13:25 crc kubenswrapper[4753]: E0129 12:13:25.692480 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"94ba13ac0c6370a35cda4438762c0ee8998bc9580dfd603301d8a0ac86f5f218\": container with ID starting with 94ba13ac0c6370a35cda4438762c0ee8998bc9580dfd603301d8a0ac86f5f218 not found: ID does not exist" containerID="94ba13ac0c6370a35cda4438762c0ee8998bc9580dfd603301d8a0ac86f5f218" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.692977 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"94ba13ac0c6370a35cda4438762c0ee8998bc9580dfd603301d8a0ac86f5f218"} err="failed to get container status \"94ba13ac0c6370a35cda4438762c0ee8998bc9580dfd603301d8a0ac86f5f218\": rpc error: code = NotFound desc = could not find container \"94ba13ac0c6370a35cda4438762c0ee8998bc9580dfd603301d8a0ac86f5f218\": container with ID starting with 94ba13ac0c6370a35cda4438762c0ee8998bc9580dfd603301d8a0ac86f5f218 not found: ID does not exist" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.693161 4753 scope.go:117] "RemoveContainer" containerID="bf1c8d45dc92bd08c960afa54a9a560ff456e1f48c9f29d50cfc4b5575df807c" Jan 29 12:13:25 crc kubenswrapper[4753]: E0129 12:13:25.696880 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bf1c8d45dc92bd08c960afa54a9a560ff456e1f48c9f29d50cfc4b5575df807c\": container with ID starting with bf1c8d45dc92bd08c960afa54a9a560ff456e1f48c9f29d50cfc4b5575df807c not found: ID does not exist" containerID="bf1c8d45dc92bd08c960afa54a9a560ff456e1f48c9f29d50cfc4b5575df807c" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.696986 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bf1c8d45dc92bd08c960afa54a9a560ff456e1f48c9f29d50cfc4b5575df807c"} err="failed to get container status \"bf1c8d45dc92bd08c960afa54a9a560ff456e1f48c9f29d50cfc4b5575df807c\": rpc error: code = NotFound desc = could not find container \"bf1c8d45dc92bd08c960afa54a9a560ff456e1f48c9f29d50cfc4b5575df807c\": container with ID starting with bf1c8d45dc92bd08c960afa54a9a560ff456e1f48c9f29d50cfc4b5575df807c not found: ID does not exist" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.844328 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-52zgz"] Jan 29 12:13:25 crc kubenswrapper[4753]: E0129 12:13:25.844665 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="225f75d2-06ff-4a8e-ad48-5fb73aba9a5f" containerName="extract-utilities" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.844700 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="225f75d2-06ff-4a8e-ad48-5fb73aba9a5f" containerName="extract-utilities" Jan 29 12:13:25 crc kubenswrapper[4753]: E0129 12:13:25.844719 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d818fc13-9863-4172-a818-4e01af393842" containerName="registry-server" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.844727 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="d818fc13-9863-4172-a818-4e01af393842" containerName="registry-server" Jan 29 12:13:25 crc kubenswrapper[4753]: E0129 12:13:25.844737 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="13c02ae6-2d17-4b39-9f64-4a7a99a6ffc6" containerName="registry-server" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.844745 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="13c02ae6-2d17-4b39-9f64-4a7a99a6ffc6" containerName="registry-server" Jan 29 12:13:25 crc kubenswrapper[4753]: E0129 12:13:25.844755 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ce6846a0-6c85-4ae9-afae-b10ead46d21d" containerName="extract-utilities" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.844763 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="ce6846a0-6c85-4ae9-afae-b10ead46d21d" containerName="extract-utilities" Jan 29 12:13:25 crc kubenswrapper[4753]: E0129 12:13:25.844773 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dad90295-65db-470c-8041-19fcf86d0439" containerName="marketplace-operator" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.844783 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="dad90295-65db-470c-8041-19fcf86d0439" containerName="marketplace-operator" Jan 29 12:13:25 crc kubenswrapper[4753]: E0129 12:13:25.844792 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ce6846a0-6c85-4ae9-afae-b10ead46d21d" containerName="registry-server" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.844800 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="ce6846a0-6c85-4ae9-afae-b10ead46d21d" containerName="registry-server" Jan 29 12:13:25 crc kubenswrapper[4753]: E0129 12:13:25.844813 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="225f75d2-06ff-4a8e-ad48-5fb73aba9a5f" containerName="registry-server" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.844821 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="225f75d2-06ff-4a8e-ad48-5fb73aba9a5f" containerName="registry-server" Jan 29 12:13:25 crc kubenswrapper[4753]: E0129 12:13:25.844832 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d818fc13-9863-4172-a818-4e01af393842" containerName="extract-utilities" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.844840 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="d818fc13-9863-4172-a818-4e01af393842" containerName="extract-utilities" Jan 29 12:13:25 crc kubenswrapper[4753]: E0129 12:13:25.844849 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dad90295-65db-470c-8041-19fcf86d0439" containerName="marketplace-operator" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.844856 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="dad90295-65db-470c-8041-19fcf86d0439" containerName="marketplace-operator" Jan 29 12:13:25 crc kubenswrapper[4753]: E0129 12:13:25.856695 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="13c02ae6-2d17-4b39-9f64-4a7a99a6ffc6" containerName="extract-content" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.856756 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="13c02ae6-2d17-4b39-9f64-4a7a99a6ffc6" containerName="extract-content" Jan 29 12:13:25 crc kubenswrapper[4753]: E0129 12:13:25.856801 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="225f75d2-06ff-4a8e-ad48-5fb73aba9a5f" containerName="extract-content" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.856807 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="225f75d2-06ff-4a8e-ad48-5fb73aba9a5f" containerName="extract-content" Jan 29 12:13:25 crc kubenswrapper[4753]: E0129 12:13:25.856820 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="13c02ae6-2d17-4b39-9f64-4a7a99a6ffc6" containerName="extract-utilities" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.856826 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="13c02ae6-2d17-4b39-9f64-4a7a99a6ffc6" containerName="extract-utilities" Jan 29 12:13:25 crc kubenswrapper[4753]: E0129 12:13:25.856848 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ce6846a0-6c85-4ae9-afae-b10ead46d21d" containerName="extract-content" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.856854 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="ce6846a0-6c85-4ae9-afae-b10ead46d21d" containerName="extract-content" Jan 29 12:13:25 crc kubenswrapper[4753]: E0129 12:13:25.856868 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d818fc13-9863-4172-a818-4e01af393842" containerName="extract-content" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.856875 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="d818fc13-9863-4172-a818-4e01af393842" containerName="extract-content" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.857247 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="dad90295-65db-470c-8041-19fcf86d0439" containerName="marketplace-operator" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.857272 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="ce6846a0-6c85-4ae9-afae-b10ead46d21d" containerName="registry-server" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.857293 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="13c02ae6-2d17-4b39-9f64-4a7a99a6ffc6" containerName="registry-server" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.857312 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="225f75d2-06ff-4a8e-ad48-5fb73aba9a5f" containerName="registry-server" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.857328 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="d818fc13-9863-4172-a818-4e01af393842" containerName="registry-server" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.857603 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="dad90295-65db-470c-8041-19fcf86d0439" containerName="marketplace-operator" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.858498 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-52zgz" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.866113 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.868109 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-52zgz"] Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.898030 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="13c02ae6-2d17-4b39-9f64-4a7a99a6ffc6" path="/var/lib/kubelet/pods/13c02ae6-2d17-4b39-9f64-4a7a99a6ffc6/volumes" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.898997 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="225f75d2-06ff-4a8e-ad48-5fb73aba9a5f" path="/var/lib/kubelet/pods/225f75d2-06ff-4a8e-ad48-5fb73aba9a5f/volumes" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.900548 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ce6846a0-6c85-4ae9-afae-b10ead46d21d" path="/var/lib/kubelet/pods/ce6846a0-6c85-4ae9-afae-b10ead46d21d/volumes" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.903016 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d818fc13-9863-4172-a818-4e01af393842" path="/var/lib/kubelet/pods/d818fc13-9863-4172-a818-4e01af393842/volumes" Jan 29 12:13:25 crc kubenswrapper[4753]: I0129 12:13:25.903763 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dad90295-65db-470c-8041-19fcf86d0439" path="/var/lib/kubelet/pods/dad90295-65db-470c-8041-19fcf86d0439/volumes" Jan 29 12:13:26 crc kubenswrapper[4753]: I0129 12:13:26.018188 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ea63566b-9a3b-4a69-aaba-18e93b24f5c3-utilities\") pod \"redhat-marketplace-52zgz\" (UID: \"ea63566b-9a3b-4a69-aaba-18e93b24f5c3\") " pod="openshift-marketplace/redhat-marketplace-52zgz" Jan 29 12:13:26 crc kubenswrapper[4753]: I0129 12:13:26.018272 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sczzc\" (UniqueName: \"kubernetes.io/projected/ea63566b-9a3b-4a69-aaba-18e93b24f5c3-kube-api-access-sczzc\") pod \"redhat-marketplace-52zgz\" (UID: \"ea63566b-9a3b-4a69-aaba-18e93b24f5c3\") " pod="openshift-marketplace/redhat-marketplace-52zgz" Jan 29 12:13:26 crc kubenswrapper[4753]: I0129 12:13:26.018306 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ea63566b-9a3b-4a69-aaba-18e93b24f5c3-catalog-content\") pod \"redhat-marketplace-52zgz\" (UID: \"ea63566b-9a3b-4a69-aaba-18e93b24f5c3\") " pod="openshift-marketplace/redhat-marketplace-52zgz" Jan 29 12:13:26 crc kubenswrapper[4753]: I0129 12:13:26.119733 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ea63566b-9a3b-4a69-aaba-18e93b24f5c3-utilities\") pod \"redhat-marketplace-52zgz\" (UID: \"ea63566b-9a3b-4a69-aaba-18e93b24f5c3\") " pod="openshift-marketplace/redhat-marketplace-52zgz" Jan 29 12:13:26 crc kubenswrapper[4753]: I0129 12:13:26.120102 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sczzc\" (UniqueName: \"kubernetes.io/projected/ea63566b-9a3b-4a69-aaba-18e93b24f5c3-kube-api-access-sczzc\") pod \"redhat-marketplace-52zgz\" (UID: \"ea63566b-9a3b-4a69-aaba-18e93b24f5c3\") " pod="openshift-marketplace/redhat-marketplace-52zgz" Jan 29 12:13:26 crc kubenswrapper[4753]: I0129 12:13:26.120126 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ea63566b-9a3b-4a69-aaba-18e93b24f5c3-catalog-content\") pod \"redhat-marketplace-52zgz\" (UID: \"ea63566b-9a3b-4a69-aaba-18e93b24f5c3\") " pod="openshift-marketplace/redhat-marketplace-52zgz" Jan 29 12:13:26 crc kubenswrapper[4753]: I0129 12:13:26.120836 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ea63566b-9a3b-4a69-aaba-18e93b24f5c3-utilities\") pod \"redhat-marketplace-52zgz\" (UID: \"ea63566b-9a3b-4a69-aaba-18e93b24f5c3\") " pod="openshift-marketplace/redhat-marketplace-52zgz" Jan 29 12:13:26 crc kubenswrapper[4753]: I0129 12:13:26.120900 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ea63566b-9a3b-4a69-aaba-18e93b24f5c3-catalog-content\") pod \"redhat-marketplace-52zgz\" (UID: \"ea63566b-9a3b-4a69-aaba-18e93b24f5c3\") " pod="openshift-marketplace/redhat-marketplace-52zgz" Jan 29 12:13:26 crc kubenswrapper[4753]: I0129 12:13:26.143736 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sczzc\" (UniqueName: \"kubernetes.io/projected/ea63566b-9a3b-4a69-aaba-18e93b24f5c3-kube-api-access-sczzc\") pod \"redhat-marketplace-52zgz\" (UID: \"ea63566b-9a3b-4a69-aaba-18e93b24f5c3\") " pod="openshift-marketplace/redhat-marketplace-52zgz" Jan 29 12:13:26 crc kubenswrapper[4753]: I0129 12:13:26.158844 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-kmlbh" event={"ID":"8c12a149-f214-43c8-a40d-57c615cbe69e","Type":"ContainerStarted","Data":"10f3d80438d1502044ec5efdf21a3089119a0a31c804004e1036867c005119d6"} Jan 29 12:13:26 crc kubenswrapper[4753]: I0129 12:13:26.158919 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-kmlbh" event={"ID":"8c12a149-f214-43c8-a40d-57c615cbe69e","Type":"ContainerStarted","Data":"ba154457da40481470d8051d47610f1fd6ebb62bbcc43f2d0d83e09642e22b6e"} Jan 29 12:13:26 crc kubenswrapper[4753]: I0129 12:13:26.185110 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-52zgz" Jan 29 12:13:26 crc kubenswrapper[4753]: I0129 12:13:26.198420 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-kmlbh" podStartSLOduration=3.198362562 podStartE2EDuration="3.198362562s" podCreationTimestamp="2026-01-29 12:13:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:13:26.18499264 +0000 UTC m=+420.437074095" watchObservedRunningTime="2026-01-29 12:13:26.198362562 +0000 UTC m=+420.450444017" Jan 29 12:13:26 crc kubenswrapper[4753]: I0129 12:13:26.615826 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-52zgz"] Jan 29 12:13:26 crc kubenswrapper[4753]: I0129 12:13:26.918586 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-892wz"] Jan 29 12:13:26 crc kubenswrapper[4753]: I0129 12:13:26.919897 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-892wz" Jan 29 12:13:26 crc kubenswrapper[4753]: I0129 12:13:26.924750 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Jan 29 12:13:26 crc kubenswrapper[4753]: I0129 12:13:26.941732 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-892wz"] Jan 29 12:13:27 crc kubenswrapper[4753]: I0129 12:13:27.093249 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/30b357a9-c353-4290-92a0-cbec35e161a0-utilities\") pod \"certified-operators-892wz\" (UID: \"30b357a9-c353-4290-92a0-cbec35e161a0\") " pod="openshift-marketplace/certified-operators-892wz" Jan 29 12:13:27 crc kubenswrapper[4753]: I0129 12:13:27.093647 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rgpld\" (UniqueName: \"kubernetes.io/projected/30b357a9-c353-4290-92a0-cbec35e161a0-kube-api-access-rgpld\") pod \"certified-operators-892wz\" (UID: \"30b357a9-c353-4290-92a0-cbec35e161a0\") " pod="openshift-marketplace/certified-operators-892wz" Jan 29 12:13:27 crc kubenswrapper[4753]: I0129 12:13:27.093696 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/30b357a9-c353-4290-92a0-cbec35e161a0-catalog-content\") pod \"certified-operators-892wz\" (UID: \"30b357a9-c353-4290-92a0-cbec35e161a0\") " pod="openshift-marketplace/certified-operators-892wz" Jan 29 12:13:27 crc kubenswrapper[4753]: I0129 12:13:27.175139 4753 generic.go:334] "Generic (PLEG): container finished" podID="ea63566b-9a3b-4a69-aaba-18e93b24f5c3" containerID="7a7ac936772e4aa5e46461c42700475df079fcf25347d6322b00a9f5d5c7bde6" exitCode=0 Jan 29 12:13:27 crc kubenswrapper[4753]: I0129 12:13:27.175297 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-52zgz" event={"ID":"ea63566b-9a3b-4a69-aaba-18e93b24f5c3","Type":"ContainerDied","Data":"7a7ac936772e4aa5e46461c42700475df079fcf25347d6322b00a9f5d5c7bde6"} Jan 29 12:13:27 crc kubenswrapper[4753]: I0129 12:13:27.175423 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-52zgz" event={"ID":"ea63566b-9a3b-4a69-aaba-18e93b24f5c3","Type":"ContainerStarted","Data":"af6e137534f1741e1dcc42d42de64567cbcbc287e2a8fc23c496a00476a29c4b"} Jan 29 12:13:27 crc kubenswrapper[4753]: I0129 12:13:27.175689 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-kmlbh" Jan 29 12:13:27 crc kubenswrapper[4753]: I0129 12:13:27.181774 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-kmlbh" Jan 29 12:13:27 crc kubenswrapper[4753]: I0129 12:13:27.194958 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/30b357a9-c353-4290-92a0-cbec35e161a0-utilities\") pod \"certified-operators-892wz\" (UID: \"30b357a9-c353-4290-92a0-cbec35e161a0\") " pod="openshift-marketplace/certified-operators-892wz" Jan 29 12:13:27 crc kubenswrapper[4753]: I0129 12:13:27.195051 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rgpld\" (UniqueName: \"kubernetes.io/projected/30b357a9-c353-4290-92a0-cbec35e161a0-kube-api-access-rgpld\") pod \"certified-operators-892wz\" (UID: \"30b357a9-c353-4290-92a0-cbec35e161a0\") " pod="openshift-marketplace/certified-operators-892wz" Jan 29 12:13:27 crc kubenswrapper[4753]: I0129 12:13:27.195678 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/30b357a9-c353-4290-92a0-cbec35e161a0-utilities\") pod \"certified-operators-892wz\" (UID: \"30b357a9-c353-4290-92a0-cbec35e161a0\") " pod="openshift-marketplace/certified-operators-892wz" Jan 29 12:13:27 crc kubenswrapper[4753]: I0129 12:13:27.195850 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/30b357a9-c353-4290-92a0-cbec35e161a0-catalog-content\") pod \"certified-operators-892wz\" (UID: \"30b357a9-c353-4290-92a0-cbec35e161a0\") " pod="openshift-marketplace/certified-operators-892wz" Jan 29 12:13:27 crc kubenswrapper[4753]: I0129 12:13:27.196972 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/30b357a9-c353-4290-92a0-cbec35e161a0-catalog-content\") pod \"certified-operators-892wz\" (UID: \"30b357a9-c353-4290-92a0-cbec35e161a0\") " pod="openshift-marketplace/certified-operators-892wz" Jan 29 12:13:27 crc kubenswrapper[4753]: I0129 12:13:27.235584 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rgpld\" (UniqueName: \"kubernetes.io/projected/30b357a9-c353-4290-92a0-cbec35e161a0-kube-api-access-rgpld\") pod \"certified-operators-892wz\" (UID: \"30b357a9-c353-4290-92a0-cbec35e161a0\") " pod="openshift-marketplace/certified-operators-892wz" Jan 29 12:13:27 crc kubenswrapper[4753]: I0129 12:13:27.282826 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-892wz" Jan 29 12:13:27 crc kubenswrapper[4753]: I0129 12:13:27.699118 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-892wz"] Jan 29 12:13:27 crc kubenswrapper[4753]: W0129 12:13:27.708043 4753 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod30b357a9_c353_4290_92a0_cbec35e161a0.slice/crio-143a72ea219efd88967551e67561825f23760cb1a53a642313f2b21d18c7fc2d WatchSource:0}: Error finding container 143a72ea219efd88967551e67561825f23760cb1a53a642313f2b21d18c7fc2d: Status 404 returned error can't find the container with id 143a72ea219efd88967551e67561825f23760cb1a53a642313f2b21d18c7fc2d Jan 29 12:13:27 crc kubenswrapper[4753]: I0129 12:13:27.918210 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-5qfcb"] Jan 29 12:13:27 crc kubenswrapper[4753]: I0129 12:13:27.919512 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5qfcb" Jan 29 12:13:27 crc kubenswrapper[4753]: I0129 12:13:27.928354 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Jan 29 12:13:27 crc kubenswrapper[4753]: I0129 12:13:27.930252 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-5qfcb"] Jan 29 12:13:28 crc kubenswrapper[4753]: I0129 12:13:28.011406 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4b6c93ff-5f67-421a-beca-c1b588d8535d-catalog-content\") pod \"redhat-operators-5qfcb\" (UID: \"4b6c93ff-5f67-421a-beca-c1b588d8535d\") " pod="openshift-marketplace/redhat-operators-5qfcb" Jan 29 12:13:28 crc kubenswrapper[4753]: I0129 12:13:28.011518 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6v5zn\" (UniqueName: \"kubernetes.io/projected/4b6c93ff-5f67-421a-beca-c1b588d8535d-kube-api-access-6v5zn\") pod \"redhat-operators-5qfcb\" (UID: \"4b6c93ff-5f67-421a-beca-c1b588d8535d\") " pod="openshift-marketplace/redhat-operators-5qfcb" Jan 29 12:13:28 crc kubenswrapper[4753]: I0129 12:13:28.011674 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4b6c93ff-5f67-421a-beca-c1b588d8535d-utilities\") pod \"redhat-operators-5qfcb\" (UID: \"4b6c93ff-5f67-421a-beca-c1b588d8535d\") " pod="openshift-marketplace/redhat-operators-5qfcb" Jan 29 12:13:28 crc kubenswrapper[4753]: I0129 12:13:28.114296 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4b6c93ff-5f67-421a-beca-c1b588d8535d-catalog-content\") pod \"redhat-operators-5qfcb\" (UID: \"4b6c93ff-5f67-421a-beca-c1b588d8535d\") " pod="openshift-marketplace/redhat-operators-5qfcb" Jan 29 12:13:28 crc kubenswrapper[4753]: I0129 12:13:28.114348 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6v5zn\" (UniqueName: \"kubernetes.io/projected/4b6c93ff-5f67-421a-beca-c1b588d8535d-kube-api-access-6v5zn\") pod \"redhat-operators-5qfcb\" (UID: \"4b6c93ff-5f67-421a-beca-c1b588d8535d\") " pod="openshift-marketplace/redhat-operators-5qfcb" Jan 29 12:13:28 crc kubenswrapper[4753]: I0129 12:13:28.114378 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4b6c93ff-5f67-421a-beca-c1b588d8535d-utilities\") pod \"redhat-operators-5qfcb\" (UID: \"4b6c93ff-5f67-421a-beca-c1b588d8535d\") " pod="openshift-marketplace/redhat-operators-5qfcb" Jan 29 12:13:28 crc kubenswrapper[4753]: I0129 12:13:28.114994 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4b6c93ff-5f67-421a-beca-c1b588d8535d-utilities\") pod \"redhat-operators-5qfcb\" (UID: \"4b6c93ff-5f67-421a-beca-c1b588d8535d\") " pod="openshift-marketplace/redhat-operators-5qfcb" Jan 29 12:13:28 crc kubenswrapper[4753]: I0129 12:13:28.115203 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4b6c93ff-5f67-421a-beca-c1b588d8535d-catalog-content\") pod \"redhat-operators-5qfcb\" (UID: \"4b6c93ff-5f67-421a-beca-c1b588d8535d\") " pod="openshift-marketplace/redhat-operators-5qfcb" Jan 29 12:13:28 crc kubenswrapper[4753]: I0129 12:13:28.134784 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6v5zn\" (UniqueName: \"kubernetes.io/projected/4b6c93ff-5f67-421a-beca-c1b588d8535d-kube-api-access-6v5zn\") pod \"redhat-operators-5qfcb\" (UID: \"4b6c93ff-5f67-421a-beca-c1b588d8535d\") " pod="openshift-marketplace/redhat-operators-5qfcb" Jan 29 12:13:28 crc kubenswrapper[4753]: I0129 12:13:28.184896 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-52zgz" event={"ID":"ea63566b-9a3b-4a69-aaba-18e93b24f5c3","Type":"ContainerStarted","Data":"564a096c69eadd130f5d07c56d04cbe13f12ea769eea1fb454316368080aee5f"} Jan 29 12:13:28 crc kubenswrapper[4753]: I0129 12:13:28.189849 4753 generic.go:334] "Generic (PLEG): container finished" podID="30b357a9-c353-4290-92a0-cbec35e161a0" containerID="af61beebbaa3e1a915c90a91188d410f3d331191cf979d68edc0bea6f6b882e1" exitCode=0 Jan 29 12:13:28 crc kubenswrapper[4753]: I0129 12:13:28.189957 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-892wz" event={"ID":"30b357a9-c353-4290-92a0-cbec35e161a0","Type":"ContainerDied","Data":"af61beebbaa3e1a915c90a91188d410f3d331191cf979d68edc0bea6f6b882e1"} Jan 29 12:13:28 crc kubenswrapper[4753]: I0129 12:13:28.190033 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-892wz" event={"ID":"30b357a9-c353-4290-92a0-cbec35e161a0","Type":"ContainerStarted","Data":"143a72ea219efd88967551e67561825f23760cb1a53a642313f2b21d18c7fc2d"} Jan 29 12:13:28 crc kubenswrapper[4753]: I0129 12:13:28.274044 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5qfcb" Jan 29 12:13:28 crc kubenswrapper[4753]: I0129 12:13:28.695035 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-5qfcb"] Jan 29 12:13:28 crc kubenswrapper[4753]: W0129 12:13:28.707490 4753 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4b6c93ff_5f67_421a_beca_c1b588d8535d.slice/crio-95c60fe02e6a43e147563a8fe74ff2293180b926771986e8c6b426fe1141c431 WatchSource:0}: Error finding container 95c60fe02e6a43e147563a8fe74ff2293180b926771986e8c6b426fe1141c431: Status 404 returned error can't find the container with id 95c60fe02e6a43e147563a8fe74ff2293180b926771986e8c6b426fe1141c431 Jan 29 12:13:29 crc kubenswrapper[4753]: I0129 12:13:29.331209 4753 patch_prober.go:28] interesting pod/machine-config-daemon-7c24x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 12:13:29 crc kubenswrapper[4753]: I0129 12:13:29.331591 4753 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" podUID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 12:13:29 crc kubenswrapper[4753]: I0129 12:13:29.357479 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-zscj5"] Jan 29 12:13:29 crc kubenswrapper[4753]: I0129 12:13:29.359112 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-zscj5" Jan 29 12:13:29 crc kubenswrapper[4753]: I0129 12:13:29.360267 4753 generic.go:334] "Generic (PLEG): container finished" podID="4b6c93ff-5f67-421a-beca-c1b588d8535d" containerID="47cfd943829c380e04ff53bb0ea1816f77ba68e79853a50b654af1f10f1d9179" exitCode=0 Jan 29 12:13:29 crc kubenswrapper[4753]: I0129 12:13:29.360362 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5qfcb" event={"ID":"4b6c93ff-5f67-421a-beca-c1b588d8535d","Type":"ContainerDied","Data":"47cfd943829c380e04ff53bb0ea1816f77ba68e79853a50b654af1f10f1d9179"} Jan 29 12:13:29 crc kubenswrapper[4753]: I0129 12:13:29.360393 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5qfcb" event={"ID":"4b6c93ff-5f67-421a-beca-c1b588d8535d","Type":"ContainerStarted","Data":"95c60fe02e6a43e147563a8fe74ff2293180b926771986e8c6b426fe1141c431"} Jan 29 12:13:29 crc kubenswrapper[4753]: I0129 12:13:29.364897 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Jan 29 12:13:29 crc kubenswrapper[4753]: I0129 12:13:29.369865 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-zscj5"] Jan 29 12:13:29 crc kubenswrapper[4753]: I0129 12:13:29.373201 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-892wz" event={"ID":"30b357a9-c353-4290-92a0-cbec35e161a0","Type":"ContainerStarted","Data":"5c229795d1adc674b0c93950842ff3e9014381daa4e80f0a718c77cf702cf486"} Jan 29 12:13:29 crc kubenswrapper[4753]: I0129 12:13:29.380702 4753 generic.go:334] "Generic (PLEG): container finished" podID="ea63566b-9a3b-4a69-aaba-18e93b24f5c3" containerID="564a096c69eadd130f5d07c56d04cbe13f12ea769eea1fb454316368080aee5f" exitCode=0 Jan 29 12:13:29 crc kubenswrapper[4753]: I0129 12:13:29.381431 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-52zgz" event={"ID":"ea63566b-9a3b-4a69-aaba-18e93b24f5c3","Type":"ContainerDied","Data":"564a096c69eadd130f5d07c56d04cbe13f12ea769eea1fb454316368080aee5f"} Jan 29 12:13:29 crc kubenswrapper[4753]: I0129 12:13:29.381469 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-52zgz" event={"ID":"ea63566b-9a3b-4a69-aaba-18e93b24f5c3","Type":"ContainerStarted","Data":"405040ef0de54516dd5d468735021c300dcf22caef60c87891e6c86c90d07a4d"} Jan 29 12:13:29 crc kubenswrapper[4753]: I0129 12:13:29.460944 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-52zgz" podStartSLOduration=2.943169626 podStartE2EDuration="4.460914456s" podCreationTimestamp="2026-01-29 12:13:25 +0000 UTC" firstStartedPulling="2026-01-29 12:13:27.17766051 +0000 UTC m=+421.429741965" lastFinishedPulling="2026-01-29 12:13:28.69540534 +0000 UTC m=+422.947486795" observedRunningTime="2026-01-29 12:13:29.453398079 +0000 UTC m=+423.705479544" watchObservedRunningTime="2026-01-29 12:13:29.460914456 +0000 UTC m=+423.712995911" Jan 29 12:13:29 crc kubenswrapper[4753]: I0129 12:13:29.535768 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1008e065-fc17-492d-8775-ced31b31aa22-utilities\") pod \"community-operators-zscj5\" (UID: \"1008e065-fc17-492d-8775-ced31b31aa22\") " pod="openshift-marketplace/community-operators-zscj5" Jan 29 12:13:29 crc kubenswrapper[4753]: I0129 12:13:29.535913 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7hcn8\" (UniqueName: \"kubernetes.io/projected/1008e065-fc17-492d-8775-ced31b31aa22-kube-api-access-7hcn8\") pod \"community-operators-zscj5\" (UID: \"1008e065-fc17-492d-8775-ced31b31aa22\") " pod="openshift-marketplace/community-operators-zscj5" Jan 29 12:13:29 crc kubenswrapper[4753]: I0129 12:13:29.536132 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1008e065-fc17-492d-8775-ced31b31aa22-catalog-content\") pod \"community-operators-zscj5\" (UID: \"1008e065-fc17-492d-8775-ced31b31aa22\") " pod="openshift-marketplace/community-operators-zscj5" Jan 29 12:13:29 crc kubenswrapper[4753]: I0129 12:13:29.637648 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1008e065-fc17-492d-8775-ced31b31aa22-catalog-content\") pod \"community-operators-zscj5\" (UID: \"1008e065-fc17-492d-8775-ced31b31aa22\") " pod="openshift-marketplace/community-operators-zscj5" Jan 29 12:13:29 crc kubenswrapper[4753]: I0129 12:13:29.637768 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1008e065-fc17-492d-8775-ced31b31aa22-utilities\") pod \"community-operators-zscj5\" (UID: \"1008e065-fc17-492d-8775-ced31b31aa22\") " pod="openshift-marketplace/community-operators-zscj5" Jan 29 12:13:29 crc kubenswrapper[4753]: I0129 12:13:29.637816 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7hcn8\" (UniqueName: \"kubernetes.io/projected/1008e065-fc17-492d-8775-ced31b31aa22-kube-api-access-7hcn8\") pod \"community-operators-zscj5\" (UID: \"1008e065-fc17-492d-8775-ced31b31aa22\") " pod="openshift-marketplace/community-operators-zscj5" Jan 29 12:13:29 crc kubenswrapper[4753]: I0129 12:13:29.638397 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1008e065-fc17-492d-8775-ced31b31aa22-catalog-content\") pod \"community-operators-zscj5\" (UID: \"1008e065-fc17-492d-8775-ced31b31aa22\") " pod="openshift-marketplace/community-operators-zscj5" Jan 29 12:13:29 crc kubenswrapper[4753]: I0129 12:13:29.638571 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1008e065-fc17-492d-8775-ced31b31aa22-utilities\") pod \"community-operators-zscj5\" (UID: \"1008e065-fc17-492d-8775-ced31b31aa22\") " pod="openshift-marketplace/community-operators-zscj5" Jan 29 12:13:29 crc kubenswrapper[4753]: I0129 12:13:29.660131 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7hcn8\" (UniqueName: \"kubernetes.io/projected/1008e065-fc17-492d-8775-ced31b31aa22-kube-api-access-7hcn8\") pod \"community-operators-zscj5\" (UID: \"1008e065-fc17-492d-8775-ced31b31aa22\") " pod="openshift-marketplace/community-operators-zscj5" Jan 29 12:13:29 crc kubenswrapper[4753]: I0129 12:13:29.684462 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-zscj5" Jan 29 12:13:31 crc kubenswrapper[4753]: I0129 12:13:30.164663 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-zscj5"] Jan 29 12:13:31 crc kubenswrapper[4753]: W0129 12:13:30.193629 4753 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1008e065_fc17_492d_8775_ced31b31aa22.slice/crio-c002101263d55721b36a71425306a79cfeb4eb31965d6d3673d11f9342378a48 WatchSource:0}: Error finding container c002101263d55721b36a71425306a79cfeb4eb31965d6d3673d11f9342378a48: Status 404 returned error can't find the container with id c002101263d55721b36a71425306a79cfeb4eb31965d6d3673d11f9342378a48 Jan 29 12:13:31 crc kubenswrapper[4753]: I0129 12:13:30.474591 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zscj5" event={"ID":"1008e065-fc17-492d-8775-ced31b31aa22","Type":"ContainerStarted","Data":"c002101263d55721b36a71425306a79cfeb4eb31965d6d3673d11f9342378a48"} Jan 29 12:13:31 crc kubenswrapper[4753]: I0129 12:13:30.484982 4753 generic.go:334] "Generic (PLEG): container finished" podID="30b357a9-c353-4290-92a0-cbec35e161a0" containerID="5c229795d1adc674b0c93950842ff3e9014381daa4e80f0a718c77cf702cf486" exitCode=0 Jan 29 12:13:31 crc kubenswrapper[4753]: I0129 12:13:30.486478 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-892wz" event={"ID":"30b357a9-c353-4290-92a0-cbec35e161a0","Type":"ContainerDied","Data":"5c229795d1adc674b0c93950842ff3e9014381daa4e80f0a718c77cf702cf486"} Jan 29 12:13:31 crc kubenswrapper[4753]: I0129 12:13:31.497464 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5qfcb" event={"ID":"4b6c93ff-5f67-421a-beca-c1b588d8535d","Type":"ContainerStarted","Data":"afecfe1bedda13722f7bf22ccb84eb3edf343b8be345f528a52ccf2f2ca5ad62"} Jan 29 12:13:31 crc kubenswrapper[4753]: I0129 12:13:31.499425 4753 generic.go:334] "Generic (PLEG): container finished" podID="1008e065-fc17-492d-8775-ced31b31aa22" containerID="3e2c6e0901bf25906b0c09c11634031ca0c8a6bda62a04cd69abf0ebb00d71d0" exitCode=0 Jan 29 12:13:31 crc kubenswrapper[4753]: I0129 12:13:31.499538 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zscj5" event={"ID":"1008e065-fc17-492d-8775-ced31b31aa22","Type":"ContainerDied","Data":"3e2c6e0901bf25906b0c09c11634031ca0c8a6bda62a04cd69abf0ebb00d71d0"} Jan 29 12:13:31 crc kubenswrapper[4753]: I0129 12:13:31.502352 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-892wz" event={"ID":"30b357a9-c353-4290-92a0-cbec35e161a0","Type":"ContainerStarted","Data":"0aa8b52a90835142e4e37ab02a7fe2519204b70b171a51f115ba004eac76c9d3"} Jan 29 12:13:31 crc kubenswrapper[4753]: I0129 12:13:31.547549 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-892wz" podStartSLOduration=2.7422275799999998 podStartE2EDuration="5.547526837s" podCreationTimestamp="2026-01-29 12:13:26 +0000 UTC" firstStartedPulling="2026-01-29 12:13:28.193134326 +0000 UTC m=+422.445215781" lastFinishedPulling="2026-01-29 12:13:30.998433583 +0000 UTC m=+425.250515038" observedRunningTime="2026-01-29 12:13:31.545332411 +0000 UTC m=+425.797414046" watchObservedRunningTime="2026-01-29 12:13:31.547526837 +0000 UTC m=+425.799608292" Jan 29 12:13:32 crc kubenswrapper[4753]: I0129 12:13:32.520868 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zscj5" event={"ID":"1008e065-fc17-492d-8775-ced31b31aa22","Type":"ContainerStarted","Data":"a839c8015ff4cfd3a6ee0fcefa41677e6ad0dc35020c579044f785d125ac71bc"} Jan 29 12:13:32 crc kubenswrapper[4753]: I0129 12:13:32.532982 4753 generic.go:334] "Generic (PLEG): container finished" podID="4b6c93ff-5f67-421a-beca-c1b588d8535d" containerID="afecfe1bedda13722f7bf22ccb84eb3edf343b8be345f528a52ccf2f2ca5ad62" exitCode=0 Jan 29 12:13:32 crc kubenswrapper[4753]: I0129 12:13:32.533197 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5qfcb" event={"ID":"4b6c93ff-5f67-421a-beca-c1b588d8535d","Type":"ContainerDied","Data":"afecfe1bedda13722f7bf22ccb84eb3edf343b8be345f528a52ccf2f2ca5ad62"} Jan 29 12:13:33 crc kubenswrapper[4753]: I0129 12:13:33.550896 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5qfcb" event={"ID":"4b6c93ff-5f67-421a-beca-c1b588d8535d","Type":"ContainerStarted","Data":"aef6124be601a3f3b3a1b45a7d7b413fd0db8c3094e348495399b9d833b8f641"} Jan 29 12:13:33 crc kubenswrapper[4753]: I0129 12:13:33.554579 4753 generic.go:334] "Generic (PLEG): container finished" podID="1008e065-fc17-492d-8775-ced31b31aa22" containerID="a839c8015ff4cfd3a6ee0fcefa41677e6ad0dc35020c579044f785d125ac71bc" exitCode=0 Jan 29 12:13:33 crc kubenswrapper[4753]: I0129 12:13:33.554636 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zscj5" event={"ID":"1008e065-fc17-492d-8775-ced31b31aa22","Type":"ContainerDied","Data":"a839c8015ff4cfd3a6ee0fcefa41677e6ad0dc35020c579044f785d125ac71bc"} Jan 29 12:13:34 crc kubenswrapper[4753]: I0129 12:13:34.566078 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zscj5" event={"ID":"1008e065-fc17-492d-8775-ced31b31aa22","Type":"ContainerStarted","Data":"f35b141acc5d01546b32d8beaab88e4845f8225bb6e415ab91f91b0d1094f042"} Jan 29 12:13:34 crc kubenswrapper[4753]: I0129 12:13:34.595950 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-5qfcb" podStartSLOduration=3.88014127 podStartE2EDuration="7.595910779s" podCreationTimestamp="2026-01-29 12:13:27 +0000 UTC" firstStartedPulling="2026-01-29 12:13:29.365064588 +0000 UTC m=+423.617146043" lastFinishedPulling="2026-01-29 12:13:33.080834097 +0000 UTC m=+427.332915552" observedRunningTime="2026-01-29 12:13:34.591037042 +0000 UTC m=+428.843118497" watchObservedRunningTime="2026-01-29 12:13:34.595910779 +0000 UTC m=+428.847992234" Jan 29 12:13:34 crc kubenswrapper[4753]: I0129 12:13:34.630815 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-zscj5" podStartSLOduration=2.9685454829999998 podStartE2EDuration="5.630774759s" podCreationTimestamp="2026-01-29 12:13:29 +0000 UTC" firstStartedPulling="2026-01-29 12:13:31.501698826 +0000 UTC m=+425.753780281" lastFinishedPulling="2026-01-29 12:13:34.163928102 +0000 UTC m=+428.416009557" observedRunningTime="2026-01-29 12:13:34.629325635 +0000 UTC m=+428.881407100" watchObservedRunningTime="2026-01-29 12:13:34.630774759 +0000 UTC m=+428.882856214" Jan 29 12:13:36 crc kubenswrapper[4753]: I0129 12:13:36.318247 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-52zgz" Jan 29 12:13:36 crc kubenswrapper[4753]: I0129 12:13:36.318707 4753 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-52zgz" Jan 29 12:13:36 crc kubenswrapper[4753]: I0129 12:13:36.381147 4753 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-52zgz" Jan 29 12:13:36 crc kubenswrapper[4753]: I0129 12:13:36.629549 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-52zgz" Jan 29 12:13:37 crc kubenswrapper[4753]: I0129 12:13:37.558716 4753 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-892wz" Jan 29 12:13:37 crc kubenswrapper[4753]: I0129 12:13:37.558782 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-892wz" Jan 29 12:13:37 crc kubenswrapper[4753]: I0129 12:13:37.653281 4753 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-892wz" Jan 29 12:13:37 crc kubenswrapper[4753]: I0129 12:13:37.704590 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-892wz" Jan 29 12:13:38 crc kubenswrapper[4753]: I0129 12:13:38.304946 4753 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-5qfcb" Jan 29 12:13:38 crc kubenswrapper[4753]: I0129 12:13:38.306940 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-5qfcb" Jan 29 12:13:39 crc kubenswrapper[4753]: I0129 12:13:39.356018 4753 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-5qfcb" podUID="4b6c93ff-5f67-421a-beca-c1b588d8535d" containerName="registry-server" probeResult="failure" output=< Jan 29 12:13:39 crc kubenswrapper[4753]: timeout: failed to connect service ":50051" within 1s Jan 29 12:13:39 crc kubenswrapper[4753]: > Jan 29 12:13:39 crc kubenswrapper[4753]: I0129 12:13:39.684945 4753 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-zscj5" Jan 29 12:13:39 crc kubenswrapper[4753]: I0129 12:13:39.685153 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-zscj5" Jan 29 12:13:39 crc kubenswrapper[4753]: I0129 12:13:39.737590 4753 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-zscj5" Jan 29 12:13:40 crc kubenswrapper[4753]: I0129 12:13:40.911882 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-zscj5" Jan 29 12:13:48 crc kubenswrapper[4753]: I0129 12:13:48.322854 4753 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-5qfcb" Jan 29 12:13:48 crc kubenswrapper[4753]: I0129 12:13:48.371836 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-5qfcb" Jan 29 12:13:59 crc kubenswrapper[4753]: I0129 12:13:59.253150 4753 patch_prober.go:28] interesting pod/machine-config-daemon-7c24x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 12:13:59 crc kubenswrapper[4753]: I0129 12:13:59.253631 4753 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" podUID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 12:13:59 crc kubenswrapper[4753]: I0129 12:13:59.253679 4753 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" Jan 29 12:13:59 crc kubenswrapper[4753]: I0129 12:13:59.254256 4753 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"427d33b9cdf4ec7ff547d57d07b232e8fe7e1dc88d7955ba9130fef447076573"} pod="openshift-machine-config-operator/machine-config-daemon-7c24x" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 29 12:13:59 crc kubenswrapper[4753]: I0129 12:13:59.254316 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" podUID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" containerName="machine-config-daemon" containerID="cri-o://427d33b9cdf4ec7ff547d57d07b232e8fe7e1dc88d7955ba9130fef447076573" gracePeriod=600 Jan 29 12:13:59 crc kubenswrapper[4753]: I0129 12:13:59.640287 4753 generic.go:334] "Generic (PLEG): container finished" podID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" containerID="427d33b9cdf4ec7ff547d57d07b232e8fe7e1dc88d7955ba9130fef447076573" exitCode=0 Jan 29 12:13:59 crc kubenswrapper[4753]: I0129 12:13:59.640386 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" event={"ID":"b0310995-a7c7-47c3-ae6c-05daaaba92a6","Type":"ContainerDied","Data":"427d33b9cdf4ec7ff547d57d07b232e8fe7e1dc88d7955ba9130fef447076573"} Jan 29 12:13:59 crc kubenswrapper[4753]: I0129 12:13:59.640638 4753 scope.go:117] "RemoveContainer" containerID="8391a68625fc3d8fdbc4f16b0b7b25e359798d08ce65ad8f482722910873418a" Jan 29 12:14:00 crc kubenswrapper[4753]: I0129 12:14:00.648205 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" event={"ID":"b0310995-a7c7-47c3-ae6c-05daaaba92a6","Type":"ContainerStarted","Data":"1e0c2015443e80d2d02eec0a9677231d797e4677debbb12d709c0bd3561be1e1"} Jan 29 12:15:00 crc kubenswrapper[4753]: I0129 12:15:00.222872 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494815-226f5"] Jan 29 12:15:00 crc kubenswrapper[4753]: I0129 12:15:00.224360 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494815-226f5" Jan 29 12:15:00 crc kubenswrapper[4753]: I0129 12:15:00.228007 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 29 12:15:00 crc kubenswrapper[4753]: I0129 12:15:00.228137 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 29 12:15:00 crc kubenswrapper[4753]: I0129 12:15:00.236535 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494815-226f5"] Jan 29 12:15:00 crc kubenswrapper[4753]: I0129 12:15:00.252992 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dhnzm\" (UniqueName: \"kubernetes.io/projected/bef72b2b-6797-43b5-b919-4fd91322b120-kube-api-access-dhnzm\") pod \"collect-profiles-29494815-226f5\" (UID: \"bef72b2b-6797-43b5-b919-4fd91322b120\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494815-226f5" Jan 29 12:15:00 crc kubenswrapper[4753]: I0129 12:15:00.253174 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/bef72b2b-6797-43b5-b919-4fd91322b120-secret-volume\") pod \"collect-profiles-29494815-226f5\" (UID: \"bef72b2b-6797-43b5-b919-4fd91322b120\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494815-226f5" Jan 29 12:15:00 crc kubenswrapper[4753]: I0129 12:15:00.253218 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/bef72b2b-6797-43b5-b919-4fd91322b120-config-volume\") pod \"collect-profiles-29494815-226f5\" (UID: \"bef72b2b-6797-43b5-b919-4fd91322b120\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494815-226f5" Jan 29 12:15:00 crc kubenswrapper[4753]: I0129 12:15:00.355092 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dhnzm\" (UniqueName: \"kubernetes.io/projected/bef72b2b-6797-43b5-b919-4fd91322b120-kube-api-access-dhnzm\") pod \"collect-profiles-29494815-226f5\" (UID: \"bef72b2b-6797-43b5-b919-4fd91322b120\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494815-226f5" Jan 29 12:15:00 crc kubenswrapper[4753]: I0129 12:15:00.355304 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/bef72b2b-6797-43b5-b919-4fd91322b120-secret-volume\") pod \"collect-profiles-29494815-226f5\" (UID: \"bef72b2b-6797-43b5-b919-4fd91322b120\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494815-226f5" Jan 29 12:15:00 crc kubenswrapper[4753]: I0129 12:15:00.355333 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/bef72b2b-6797-43b5-b919-4fd91322b120-config-volume\") pod \"collect-profiles-29494815-226f5\" (UID: \"bef72b2b-6797-43b5-b919-4fd91322b120\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494815-226f5" Jan 29 12:15:00 crc kubenswrapper[4753]: I0129 12:15:00.356396 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/bef72b2b-6797-43b5-b919-4fd91322b120-config-volume\") pod \"collect-profiles-29494815-226f5\" (UID: \"bef72b2b-6797-43b5-b919-4fd91322b120\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494815-226f5" Jan 29 12:15:00 crc kubenswrapper[4753]: I0129 12:15:00.363643 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/bef72b2b-6797-43b5-b919-4fd91322b120-secret-volume\") pod \"collect-profiles-29494815-226f5\" (UID: \"bef72b2b-6797-43b5-b919-4fd91322b120\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494815-226f5" Jan 29 12:15:00 crc kubenswrapper[4753]: I0129 12:15:00.382630 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dhnzm\" (UniqueName: \"kubernetes.io/projected/bef72b2b-6797-43b5-b919-4fd91322b120-kube-api-access-dhnzm\") pod \"collect-profiles-29494815-226f5\" (UID: \"bef72b2b-6797-43b5-b919-4fd91322b120\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494815-226f5" Jan 29 12:15:00 crc kubenswrapper[4753]: I0129 12:15:00.551461 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494815-226f5" Jan 29 12:15:01 crc kubenswrapper[4753]: I0129 12:15:01.169125 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494815-226f5"] Jan 29 12:15:01 crc kubenswrapper[4753]: I0129 12:15:01.539182 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494815-226f5" event={"ID":"bef72b2b-6797-43b5-b919-4fd91322b120","Type":"ContainerStarted","Data":"8c4487654a7db7aa89abd6a54d4cd87c9f9da6fe70f5e6b383bbb185946d14f5"} Jan 29 12:15:01 crc kubenswrapper[4753]: I0129 12:15:01.540077 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494815-226f5" event={"ID":"bef72b2b-6797-43b5-b919-4fd91322b120","Type":"ContainerStarted","Data":"5ef37fa38e8d12658cd646c70b9d22066753dff7be5105bec91f5eb31b161dac"} Jan 29 12:15:01 crc kubenswrapper[4753]: I0129 12:15:01.558767 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29494815-226f5" podStartSLOduration=1.5587444289999999 podStartE2EDuration="1.558744429s" podCreationTimestamp="2026-01-29 12:15:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:15:01.555970976 +0000 UTC m=+515.808052431" watchObservedRunningTime="2026-01-29 12:15:01.558744429 +0000 UTC m=+515.810825884" Jan 29 12:15:02 crc kubenswrapper[4753]: I0129 12:15:02.551950 4753 generic.go:334] "Generic (PLEG): container finished" podID="bef72b2b-6797-43b5-b919-4fd91322b120" containerID="8c4487654a7db7aa89abd6a54d4cd87c9f9da6fe70f5e6b383bbb185946d14f5" exitCode=0 Jan 29 12:15:02 crc kubenswrapper[4753]: I0129 12:15:02.552165 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494815-226f5" event={"ID":"bef72b2b-6797-43b5-b919-4fd91322b120","Type":"ContainerDied","Data":"8c4487654a7db7aa89abd6a54d4cd87c9f9da6fe70f5e6b383bbb185946d14f5"} Jan 29 12:15:03 crc kubenswrapper[4753]: I0129 12:15:03.831166 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494815-226f5" Jan 29 12:15:03 crc kubenswrapper[4753]: I0129 12:15:03.858410 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/bef72b2b-6797-43b5-b919-4fd91322b120-secret-volume\") pod \"bef72b2b-6797-43b5-b919-4fd91322b120\" (UID: \"bef72b2b-6797-43b5-b919-4fd91322b120\") " Jan 29 12:15:03 crc kubenswrapper[4753]: I0129 12:15:03.858535 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dhnzm\" (UniqueName: \"kubernetes.io/projected/bef72b2b-6797-43b5-b919-4fd91322b120-kube-api-access-dhnzm\") pod \"bef72b2b-6797-43b5-b919-4fd91322b120\" (UID: \"bef72b2b-6797-43b5-b919-4fd91322b120\") " Jan 29 12:15:03 crc kubenswrapper[4753]: I0129 12:15:03.858594 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/bef72b2b-6797-43b5-b919-4fd91322b120-config-volume\") pod \"bef72b2b-6797-43b5-b919-4fd91322b120\" (UID: \"bef72b2b-6797-43b5-b919-4fd91322b120\") " Jan 29 12:15:03 crc kubenswrapper[4753]: I0129 12:15:03.860709 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bef72b2b-6797-43b5-b919-4fd91322b120-config-volume" (OuterVolumeSpecName: "config-volume") pod "bef72b2b-6797-43b5-b919-4fd91322b120" (UID: "bef72b2b-6797-43b5-b919-4fd91322b120"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:15:03 crc kubenswrapper[4753]: I0129 12:15:03.864991 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bef72b2b-6797-43b5-b919-4fd91322b120-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "bef72b2b-6797-43b5-b919-4fd91322b120" (UID: "bef72b2b-6797-43b5-b919-4fd91322b120"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:15:03 crc kubenswrapper[4753]: I0129 12:15:03.865070 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bef72b2b-6797-43b5-b919-4fd91322b120-kube-api-access-dhnzm" (OuterVolumeSpecName: "kube-api-access-dhnzm") pod "bef72b2b-6797-43b5-b919-4fd91322b120" (UID: "bef72b2b-6797-43b5-b919-4fd91322b120"). InnerVolumeSpecName "kube-api-access-dhnzm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:15:03 crc kubenswrapper[4753]: I0129 12:15:03.960180 4753 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/bef72b2b-6797-43b5-b919-4fd91322b120-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 29 12:15:03 crc kubenswrapper[4753]: I0129 12:15:03.960273 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dhnzm\" (UniqueName: \"kubernetes.io/projected/bef72b2b-6797-43b5-b919-4fd91322b120-kube-api-access-dhnzm\") on node \"crc\" DevicePath \"\"" Jan 29 12:15:03 crc kubenswrapper[4753]: I0129 12:15:03.960291 4753 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/bef72b2b-6797-43b5-b919-4fd91322b120-config-volume\") on node \"crc\" DevicePath \"\"" Jan 29 12:15:04 crc kubenswrapper[4753]: I0129 12:15:04.571240 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494815-226f5" event={"ID":"bef72b2b-6797-43b5-b919-4fd91322b120","Type":"ContainerDied","Data":"5ef37fa38e8d12658cd646c70b9d22066753dff7be5105bec91f5eb31b161dac"} Jan 29 12:15:04 crc kubenswrapper[4753]: I0129 12:15:04.571648 4753 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5ef37fa38e8d12658cd646c70b9d22066753dff7be5105bec91f5eb31b161dac" Jan 29 12:15:04 crc kubenswrapper[4753]: I0129 12:15:04.571331 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494815-226f5" Jan 29 12:15:39 crc kubenswrapper[4753]: I0129 12:15:39.313484 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-m8764"] Jan 29 12:15:39 crc kubenswrapper[4753]: E0129 12:15:39.314786 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bef72b2b-6797-43b5-b919-4fd91322b120" containerName="collect-profiles" Jan 29 12:15:39 crc kubenswrapper[4753]: I0129 12:15:39.314807 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="bef72b2b-6797-43b5-b919-4fd91322b120" containerName="collect-profiles" Jan 29 12:15:39 crc kubenswrapper[4753]: I0129 12:15:39.314963 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="bef72b2b-6797-43b5-b919-4fd91322b120" containerName="collect-profiles" Jan 29 12:15:39 crc kubenswrapper[4753]: I0129 12:15:39.315693 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-m8764" Jan 29 12:15:39 crc kubenswrapper[4753]: I0129 12:15:39.335099 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-m8764"] Jan 29 12:15:39 crc kubenswrapper[4753]: I0129 12:15:39.352107 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/64129e0d-3ae3-4ebd-b39a-9e0d4271d847-registry-certificates\") pod \"image-registry-66df7c8f76-m8764\" (UID: \"64129e0d-3ae3-4ebd-b39a-9e0d4271d847\") " pod="openshift-image-registry/image-registry-66df7c8f76-m8764" Jan 29 12:15:39 crc kubenswrapper[4753]: I0129 12:15:39.352171 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/64129e0d-3ae3-4ebd-b39a-9e0d4271d847-ca-trust-extracted\") pod \"image-registry-66df7c8f76-m8764\" (UID: \"64129e0d-3ae3-4ebd-b39a-9e0d4271d847\") " pod="openshift-image-registry/image-registry-66df7c8f76-m8764" Jan 29 12:15:39 crc kubenswrapper[4753]: I0129 12:15:39.352303 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/64129e0d-3ae3-4ebd-b39a-9e0d4271d847-trusted-ca\") pod \"image-registry-66df7c8f76-m8764\" (UID: \"64129e0d-3ae3-4ebd-b39a-9e0d4271d847\") " pod="openshift-image-registry/image-registry-66df7c8f76-m8764" Jan 29 12:15:39 crc kubenswrapper[4753]: I0129 12:15:39.352345 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vh7h2\" (UniqueName: \"kubernetes.io/projected/64129e0d-3ae3-4ebd-b39a-9e0d4271d847-kube-api-access-vh7h2\") pod \"image-registry-66df7c8f76-m8764\" (UID: \"64129e0d-3ae3-4ebd-b39a-9e0d4271d847\") " pod="openshift-image-registry/image-registry-66df7c8f76-m8764" Jan 29 12:15:39 crc kubenswrapper[4753]: I0129 12:15:39.352389 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/64129e0d-3ae3-4ebd-b39a-9e0d4271d847-registry-tls\") pod \"image-registry-66df7c8f76-m8764\" (UID: \"64129e0d-3ae3-4ebd-b39a-9e0d4271d847\") " pod="openshift-image-registry/image-registry-66df7c8f76-m8764" Jan 29 12:15:39 crc kubenswrapper[4753]: I0129 12:15:39.352417 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/64129e0d-3ae3-4ebd-b39a-9e0d4271d847-installation-pull-secrets\") pod \"image-registry-66df7c8f76-m8764\" (UID: \"64129e0d-3ae3-4ebd-b39a-9e0d4271d847\") " pod="openshift-image-registry/image-registry-66df7c8f76-m8764" Jan 29 12:15:39 crc kubenswrapper[4753]: I0129 12:15:39.352437 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/64129e0d-3ae3-4ebd-b39a-9e0d4271d847-bound-sa-token\") pod \"image-registry-66df7c8f76-m8764\" (UID: \"64129e0d-3ae3-4ebd-b39a-9e0d4271d847\") " pod="openshift-image-registry/image-registry-66df7c8f76-m8764" Jan 29 12:15:39 crc kubenswrapper[4753]: I0129 12:15:39.352565 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-m8764\" (UID: \"64129e0d-3ae3-4ebd-b39a-9e0d4271d847\") " pod="openshift-image-registry/image-registry-66df7c8f76-m8764" Jan 29 12:15:39 crc kubenswrapper[4753]: I0129 12:15:39.453877 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/64129e0d-3ae3-4ebd-b39a-9e0d4271d847-registry-tls\") pod \"image-registry-66df7c8f76-m8764\" (UID: \"64129e0d-3ae3-4ebd-b39a-9e0d4271d847\") " pod="openshift-image-registry/image-registry-66df7c8f76-m8764" Jan 29 12:15:39 crc kubenswrapper[4753]: I0129 12:15:39.453956 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/64129e0d-3ae3-4ebd-b39a-9e0d4271d847-installation-pull-secrets\") pod \"image-registry-66df7c8f76-m8764\" (UID: \"64129e0d-3ae3-4ebd-b39a-9e0d4271d847\") " pod="openshift-image-registry/image-registry-66df7c8f76-m8764" Jan 29 12:15:39 crc kubenswrapper[4753]: I0129 12:15:39.453980 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/64129e0d-3ae3-4ebd-b39a-9e0d4271d847-bound-sa-token\") pod \"image-registry-66df7c8f76-m8764\" (UID: \"64129e0d-3ae3-4ebd-b39a-9e0d4271d847\") " pod="openshift-image-registry/image-registry-66df7c8f76-m8764" Jan 29 12:15:39 crc kubenswrapper[4753]: I0129 12:15:39.454040 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/64129e0d-3ae3-4ebd-b39a-9e0d4271d847-registry-certificates\") pod \"image-registry-66df7c8f76-m8764\" (UID: \"64129e0d-3ae3-4ebd-b39a-9e0d4271d847\") " pod="openshift-image-registry/image-registry-66df7c8f76-m8764" Jan 29 12:15:39 crc kubenswrapper[4753]: I0129 12:15:39.454059 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/64129e0d-3ae3-4ebd-b39a-9e0d4271d847-ca-trust-extracted\") pod \"image-registry-66df7c8f76-m8764\" (UID: \"64129e0d-3ae3-4ebd-b39a-9e0d4271d847\") " pod="openshift-image-registry/image-registry-66df7c8f76-m8764" Jan 29 12:15:39 crc kubenswrapper[4753]: I0129 12:15:39.454082 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/64129e0d-3ae3-4ebd-b39a-9e0d4271d847-trusted-ca\") pod \"image-registry-66df7c8f76-m8764\" (UID: \"64129e0d-3ae3-4ebd-b39a-9e0d4271d847\") " pod="openshift-image-registry/image-registry-66df7c8f76-m8764" Jan 29 12:15:39 crc kubenswrapper[4753]: I0129 12:15:39.454109 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vh7h2\" (UniqueName: \"kubernetes.io/projected/64129e0d-3ae3-4ebd-b39a-9e0d4271d847-kube-api-access-vh7h2\") pod \"image-registry-66df7c8f76-m8764\" (UID: \"64129e0d-3ae3-4ebd-b39a-9e0d4271d847\") " pod="openshift-image-registry/image-registry-66df7c8f76-m8764" Jan 29 12:15:39 crc kubenswrapper[4753]: I0129 12:15:39.455699 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/64129e0d-3ae3-4ebd-b39a-9e0d4271d847-ca-trust-extracted\") pod \"image-registry-66df7c8f76-m8764\" (UID: \"64129e0d-3ae3-4ebd-b39a-9e0d4271d847\") " pod="openshift-image-registry/image-registry-66df7c8f76-m8764" Jan 29 12:15:39 crc kubenswrapper[4753]: I0129 12:15:39.457115 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/64129e0d-3ae3-4ebd-b39a-9e0d4271d847-trusted-ca\") pod \"image-registry-66df7c8f76-m8764\" (UID: \"64129e0d-3ae3-4ebd-b39a-9e0d4271d847\") " pod="openshift-image-registry/image-registry-66df7c8f76-m8764" Jan 29 12:15:39 crc kubenswrapper[4753]: I0129 12:15:39.458288 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/64129e0d-3ae3-4ebd-b39a-9e0d4271d847-registry-certificates\") pod \"image-registry-66df7c8f76-m8764\" (UID: \"64129e0d-3ae3-4ebd-b39a-9e0d4271d847\") " pod="openshift-image-registry/image-registry-66df7c8f76-m8764" Jan 29 12:15:39 crc kubenswrapper[4753]: I0129 12:15:39.461917 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/64129e0d-3ae3-4ebd-b39a-9e0d4271d847-installation-pull-secrets\") pod \"image-registry-66df7c8f76-m8764\" (UID: \"64129e0d-3ae3-4ebd-b39a-9e0d4271d847\") " pod="openshift-image-registry/image-registry-66df7c8f76-m8764" Jan 29 12:15:39 crc kubenswrapper[4753]: I0129 12:15:39.463732 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/64129e0d-3ae3-4ebd-b39a-9e0d4271d847-registry-tls\") pod \"image-registry-66df7c8f76-m8764\" (UID: \"64129e0d-3ae3-4ebd-b39a-9e0d4271d847\") " pod="openshift-image-registry/image-registry-66df7c8f76-m8764" Jan 29 12:15:39 crc kubenswrapper[4753]: I0129 12:15:39.467456 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-m8764\" (UID: \"64129e0d-3ae3-4ebd-b39a-9e0d4271d847\") " pod="openshift-image-registry/image-registry-66df7c8f76-m8764" Jan 29 12:15:39 crc kubenswrapper[4753]: I0129 12:15:39.474126 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vh7h2\" (UniqueName: \"kubernetes.io/projected/64129e0d-3ae3-4ebd-b39a-9e0d4271d847-kube-api-access-vh7h2\") pod \"image-registry-66df7c8f76-m8764\" (UID: \"64129e0d-3ae3-4ebd-b39a-9e0d4271d847\") " pod="openshift-image-registry/image-registry-66df7c8f76-m8764" Jan 29 12:15:39 crc kubenswrapper[4753]: I0129 12:15:39.474362 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/64129e0d-3ae3-4ebd-b39a-9e0d4271d847-bound-sa-token\") pod \"image-registry-66df7c8f76-m8764\" (UID: \"64129e0d-3ae3-4ebd-b39a-9e0d4271d847\") " pod="openshift-image-registry/image-registry-66df7c8f76-m8764" Jan 29 12:15:39 crc kubenswrapper[4753]: I0129 12:15:39.639347 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-m8764" Jan 29 12:15:40 crc kubenswrapper[4753]: I0129 12:15:40.043178 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-m8764"] Jan 29 12:15:40 crc kubenswrapper[4753]: I0129 12:15:40.903293 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-m8764" event={"ID":"64129e0d-3ae3-4ebd-b39a-9e0d4271d847","Type":"ContainerStarted","Data":"7377272a73bb2a4c328d1bc047ff9be246abb86420c74470081908e0bbc8ae70"} Jan 29 12:15:40 crc kubenswrapper[4753]: I0129 12:15:40.903356 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-m8764" event={"ID":"64129e0d-3ae3-4ebd-b39a-9e0d4271d847","Type":"ContainerStarted","Data":"63b667adef4fba8d3d3abc836f7c4ca1f04f6c24e4f6eb041eefb1f17e3068ed"} Jan 29 12:15:40 crc kubenswrapper[4753]: I0129 12:15:40.903466 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-66df7c8f76-m8764" Jan 29 12:15:40 crc kubenswrapper[4753]: I0129 12:15:40.926212 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-66df7c8f76-m8764" podStartSLOduration=1.926187163 podStartE2EDuration="1.926187163s" podCreationTimestamp="2026-01-29 12:15:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:15:40.922439145 +0000 UTC m=+555.174520600" watchObservedRunningTime="2026-01-29 12:15:40.926187163 +0000 UTC m=+555.178268618" Jan 29 12:15:59 crc kubenswrapper[4753]: I0129 12:15:59.647550 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-66df7c8f76-m8764" Jan 29 12:15:59 crc kubenswrapper[4753]: I0129 12:15:59.727854 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-w6vx6"] Jan 29 12:16:24 crc kubenswrapper[4753]: I0129 12:16:24.766366 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" podUID="df61f830-b312-4a01-8d17-057799312936" containerName="registry" containerID="cri-o://003f507526dc99f895161873c068f031ad3529cf912847135409593600a77d55" gracePeriod=30 Jan 29 12:16:25 crc kubenswrapper[4753]: I0129 12:16:25.149800 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:16:25 crc kubenswrapper[4753]: I0129 12:16:25.281637 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/df61f830-b312-4a01-8d17-057799312936-registry-tls\") pod \"df61f830-b312-4a01-8d17-057799312936\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " Jan 29 12:16:25 crc kubenswrapper[4753]: I0129 12:16:25.282039 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/df61f830-b312-4a01-8d17-057799312936-bound-sa-token\") pod \"df61f830-b312-4a01-8d17-057799312936\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " Jan 29 12:16:25 crc kubenswrapper[4753]: I0129 12:16:25.282317 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"df61f830-b312-4a01-8d17-057799312936\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " Jan 29 12:16:25 crc kubenswrapper[4753]: I0129 12:16:25.283353 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/df61f830-b312-4a01-8d17-057799312936-installation-pull-secrets\") pod \"df61f830-b312-4a01-8d17-057799312936\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " Jan 29 12:16:25 crc kubenswrapper[4753]: I0129 12:16:25.283408 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/df61f830-b312-4a01-8d17-057799312936-ca-trust-extracted\") pod \"df61f830-b312-4a01-8d17-057799312936\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " Jan 29 12:16:25 crc kubenswrapper[4753]: I0129 12:16:25.283438 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-766r8\" (UniqueName: \"kubernetes.io/projected/df61f830-b312-4a01-8d17-057799312936-kube-api-access-766r8\") pod \"df61f830-b312-4a01-8d17-057799312936\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " Jan 29 12:16:25 crc kubenswrapper[4753]: I0129 12:16:25.283472 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/df61f830-b312-4a01-8d17-057799312936-trusted-ca\") pod \"df61f830-b312-4a01-8d17-057799312936\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " Jan 29 12:16:25 crc kubenswrapper[4753]: I0129 12:16:25.284630 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/df61f830-b312-4a01-8d17-057799312936-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "df61f830-b312-4a01-8d17-057799312936" (UID: "df61f830-b312-4a01-8d17-057799312936"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:16:25 crc kubenswrapper[4753]: I0129 12:16:25.283513 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/df61f830-b312-4a01-8d17-057799312936-registry-certificates\") pod \"df61f830-b312-4a01-8d17-057799312936\" (UID: \"df61f830-b312-4a01-8d17-057799312936\") " Jan 29 12:16:25 crc kubenswrapper[4753]: I0129 12:16:25.285920 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/df61f830-b312-4a01-8d17-057799312936-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "df61f830-b312-4a01-8d17-057799312936" (UID: "df61f830-b312-4a01-8d17-057799312936"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:16:25 crc kubenswrapper[4753]: I0129 12:16:25.286769 4753 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/df61f830-b312-4a01-8d17-057799312936-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 29 12:16:25 crc kubenswrapper[4753]: I0129 12:16:25.293616 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/df61f830-b312-4a01-8d17-057799312936-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "df61f830-b312-4a01-8d17-057799312936" (UID: "df61f830-b312-4a01-8d17-057799312936"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:16:25 crc kubenswrapper[4753]: I0129 12:16:25.295689 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/df61f830-b312-4a01-8d17-057799312936-kube-api-access-766r8" (OuterVolumeSpecName: "kube-api-access-766r8") pod "df61f830-b312-4a01-8d17-057799312936" (UID: "df61f830-b312-4a01-8d17-057799312936"). InnerVolumeSpecName "kube-api-access-766r8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:16:25 crc kubenswrapper[4753]: I0129 12:16:25.303094 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/df61f830-b312-4a01-8d17-057799312936-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "df61f830-b312-4a01-8d17-057799312936" (UID: "df61f830-b312-4a01-8d17-057799312936"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:16:25 crc kubenswrapper[4753]: I0129 12:16:25.303629 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/df61f830-b312-4a01-8d17-057799312936-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "df61f830-b312-4a01-8d17-057799312936" (UID: "df61f830-b312-4a01-8d17-057799312936"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:16:25 crc kubenswrapper[4753]: I0129 12:16:25.315078 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/df61f830-b312-4a01-8d17-057799312936-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "df61f830-b312-4a01-8d17-057799312936" (UID: "df61f830-b312-4a01-8d17-057799312936"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:16:25 crc kubenswrapper[4753]: I0129 12:16:25.332367 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "registry-storage") pod "df61f830-b312-4a01-8d17-057799312936" (UID: "df61f830-b312-4a01-8d17-057799312936"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 29 12:16:25 crc kubenswrapper[4753]: I0129 12:16:25.387871 4753 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/df61f830-b312-4a01-8d17-057799312936-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Jan 29 12:16:25 crc kubenswrapper[4753]: I0129 12:16:25.387932 4753 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/df61f830-b312-4a01-8d17-057799312936-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Jan 29 12:16:25 crc kubenswrapper[4753]: I0129 12:16:25.387945 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-766r8\" (UniqueName: \"kubernetes.io/projected/df61f830-b312-4a01-8d17-057799312936-kube-api-access-766r8\") on node \"crc\" DevicePath \"\"" Jan 29 12:16:25 crc kubenswrapper[4753]: I0129 12:16:25.387955 4753 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/df61f830-b312-4a01-8d17-057799312936-registry-certificates\") on node \"crc\" DevicePath \"\"" Jan 29 12:16:25 crc kubenswrapper[4753]: I0129 12:16:25.387970 4753 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/df61f830-b312-4a01-8d17-057799312936-registry-tls\") on node \"crc\" DevicePath \"\"" Jan 29 12:16:25 crc kubenswrapper[4753]: I0129 12:16:25.387979 4753 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/df61f830-b312-4a01-8d17-057799312936-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 29 12:16:25 crc kubenswrapper[4753]: I0129 12:16:25.821884 4753 generic.go:334] "Generic (PLEG): container finished" podID="df61f830-b312-4a01-8d17-057799312936" containerID="003f507526dc99f895161873c068f031ad3529cf912847135409593600a77d55" exitCode=0 Jan 29 12:16:25 crc kubenswrapper[4753]: I0129 12:16:25.822000 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" Jan 29 12:16:25 crc kubenswrapper[4753]: I0129 12:16:25.822618 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" event={"ID":"df61f830-b312-4a01-8d17-057799312936","Type":"ContainerDied","Data":"003f507526dc99f895161873c068f031ad3529cf912847135409593600a77d55"} Jan 29 12:16:25 crc kubenswrapper[4753]: I0129 12:16:25.823174 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-w6vx6" event={"ID":"df61f830-b312-4a01-8d17-057799312936","Type":"ContainerDied","Data":"442a1d8a690b33f4f8314dec6144e50209b7db8e5bc4179bb526d947568780da"} Jan 29 12:16:25 crc kubenswrapper[4753]: I0129 12:16:25.823306 4753 scope.go:117] "RemoveContainer" containerID="003f507526dc99f895161873c068f031ad3529cf912847135409593600a77d55" Jan 29 12:16:25 crc kubenswrapper[4753]: I0129 12:16:25.848630 4753 scope.go:117] "RemoveContainer" containerID="003f507526dc99f895161873c068f031ad3529cf912847135409593600a77d55" Jan 29 12:16:25 crc kubenswrapper[4753]: E0129 12:16:25.849705 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"003f507526dc99f895161873c068f031ad3529cf912847135409593600a77d55\": container with ID starting with 003f507526dc99f895161873c068f031ad3529cf912847135409593600a77d55 not found: ID does not exist" containerID="003f507526dc99f895161873c068f031ad3529cf912847135409593600a77d55" Jan 29 12:16:25 crc kubenswrapper[4753]: I0129 12:16:25.849748 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"003f507526dc99f895161873c068f031ad3529cf912847135409593600a77d55"} err="failed to get container status \"003f507526dc99f895161873c068f031ad3529cf912847135409593600a77d55\": rpc error: code = NotFound desc = could not find container \"003f507526dc99f895161873c068f031ad3529cf912847135409593600a77d55\": container with ID starting with 003f507526dc99f895161873c068f031ad3529cf912847135409593600a77d55 not found: ID does not exist" Jan 29 12:16:25 crc kubenswrapper[4753]: I0129 12:16:25.862927 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-w6vx6"] Jan 29 12:16:25 crc kubenswrapper[4753]: I0129 12:16:25.866271 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-w6vx6"] Jan 29 12:16:25 crc kubenswrapper[4753]: I0129 12:16:25.901831 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="df61f830-b312-4a01-8d17-057799312936" path="/var/lib/kubelet/pods/df61f830-b312-4a01-8d17-057799312936/volumes" Jan 29 12:16:29 crc kubenswrapper[4753]: I0129 12:16:29.253536 4753 patch_prober.go:28] interesting pod/machine-config-daemon-7c24x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 12:16:29 crc kubenswrapper[4753]: I0129 12:16:29.253948 4753 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" podUID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 12:16:59 crc kubenswrapper[4753]: I0129 12:16:59.253285 4753 patch_prober.go:28] interesting pod/machine-config-daemon-7c24x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 12:16:59 crc kubenswrapper[4753]: I0129 12:16:59.253925 4753 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" podUID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 12:17:29 crc kubenswrapper[4753]: I0129 12:17:29.252983 4753 patch_prober.go:28] interesting pod/machine-config-daemon-7c24x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 12:17:29 crc kubenswrapper[4753]: I0129 12:17:29.253634 4753 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" podUID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 12:17:29 crc kubenswrapper[4753]: I0129 12:17:29.253693 4753 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" Jan 29 12:17:29 crc kubenswrapper[4753]: I0129 12:17:29.254419 4753 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"1e0c2015443e80d2d02eec0a9677231d797e4677debbb12d709c0bd3561be1e1"} pod="openshift-machine-config-operator/machine-config-daemon-7c24x" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 29 12:17:29 crc kubenswrapper[4753]: I0129 12:17:29.254504 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" podUID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" containerName="machine-config-daemon" containerID="cri-o://1e0c2015443e80d2d02eec0a9677231d797e4677debbb12d709c0bd3561be1e1" gracePeriod=600 Jan 29 12:17:29 crc kubenswrapper[4753]: I0129 12:17:29.506317 4753 generic.go:334] "Generic (PLEG): container finished" podID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" containerID="1e0c2015443e80d2d02eec0a9677231d797e4677debbb12d709c0bd3561be1e1" exitCode=0 Jan 29 12:17:29 crc kubenswrapper[4753]: I0129 12:17:29.506411 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" event={"ID":"b0310995-a7c7-47c3-ae6c-05daaaba92a6","Type":"ContainerDied","Data":"1e0c2015443e80d2d02eec0a9677231d797e4677debbb12d709c0bd3561be1e1"} Jan 29 12:17:29 crc kubenswrapper[4753]: I0129 12:17:29.506663 4753 scope.go:117] "RemoveContainer" containerID="427d33b9cdf4ec7ff547d57d07b232e8fe7e1dc88d7955ba9130fef447076573" Jan 29 12:17:30 crc kubenswrapper[4753]: I0129 12:17:30.542130 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" event={"ID":"b0310995-a7c7-47c3-ae6c-05daaaba92a6","Type":"ContainerStarted","Data":"4b8aed6ea15733b89649d8b36ec89a9ec9f88b46186a18e1942be3af55f96320"} Jan 29 12:19:02 crc kubenswrapper[4753]: I0129 12:19:02.979243 4753 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Jan 29 12:19:29 crc kubenswrapper[4753]: I0129 12:19:29.253291 4753 patch_prober.go:28] interesting pod/machine-config-daemon-7c24x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 12:19:29 crc kubenswrapper[4753]: I0129 12:19:29.253977 4753 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" podUID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 12:19:30 crc kubenswrapper[4753]: I0129 12:19:30.406721 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-nzkvz"] Jan 29 12:19:30 crc kubenswrapper[4753]: I0129 12:19:30.407459 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" podUID="80bec2ab-0a88-4818-9339-760edda3b07e" containerName="ovn-controller" containerID="cri-o://05d1e99596530f467ecdae51aec8db191ac045cd85e121fc9835e2f3688f6ae7" gracePeriod=30 Jan 29 12:19:30 crc kubenswrapper[4753]: I0129 12:19:30.407534 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" podUID="80bec2ab-0a88-4818-9339-760edda3b07e" containerName="nbdb" containerID="cri-o://f71859413676dab4a435d43c11ad7cba315a12de744c3c8b161273f6ad36580c" gracePeriod=30 Jan 29 12:19:30 crc kubenswrapper[4753]: I0129 12:19:30.407604 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" podUID="80bec2ab-0a88-4818-9339-760edda3b07e" containerName="ovn-acl-logging" containerID="cri-o://91c1df8b6763320498420463a3841f0bfa4fcf753c355f549c3c1e93bf5206a8" gracePeriod=30 Jan 29 12:19:30 crc kubenswrapper[4753]: I0129 12:19:30.407695 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" podUID="80bec2ab-0a88-4818-9339-760edda3b07e" containerName="sbdb" containerID="cri-o://8e40e5b13dfdc135ea09253d466bacdfc5c2ac3d28eabc76d833d71ac300905c" gracePeriod=30 Jan 29 12:19:30 crc kubenswrapper[4753]: I0129 12:19:30.407695 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" podUID="80bec2ab-0a88-4818-9339-760edda3b07e" containerName="northd" containerID="cri-o://b0ec2a21ec299b5eed363668f39a270a8b03ea79b9de3dd23bda0c30579c3de3" gracePeriod=30 Jan 29 12:19:30 crc kubenswrapper[4753]: I0129 12:19:30.407754 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" podUID="80bec2ab-0a88-4818-9339-760edda3b07e" containerName="kube-rbac-proxy-ovn-metrics" containerID="cri-o://1e6a7d8df812546f17b3e026005c5062086e97e3fa364b3fa22cefec025e4eba" gracePeriod=30 Jan 29 12:19:30 crc kubenswrapper[4753]: I0129 12:19:30.407892 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" podUID="80bec2ab-0a88-4818-9339-760edda3b07e" containerName="kube-rbac-proxy-node" containerID="cri-o://0f15016e29f1fc14f51b4d01e772071333fd8a885cacc1804b3a03dbcfc93836" gracePeriod=30 Jan 29 12:19:30 crc kubenswrapper[4753]: I0129 12:19:30.454877 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" podUID="80bec2ab-0a88-4818-9339-760edda3b07e" containerName="ovnkube-controller" containerID="cri-o://9ee2d75fd4d36ec3e097a8aa1ca55cef7754f5a5ed88d149bcf63e90d0fbf2c9" gracePeriod=30 Jan 29 12:19:30 crc kubenswrapper[4753]: I0129 12:19:30.766886 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-nzkvz_80bec2ab-0a88-4818-9339-760edda3b07e/ovnkube-controller/2.log" Jan 29 12:19:30 crc kubenswrapper[4753]: I0129 12:19:30.775774 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-nzkvz_80bec2ab-0a88-4818-9339-760edda3b07e/ovn-acl-logging/0.log" Jan 29 12:19:30 crc kubenswrapper[4753]: I0129 12:19:30.776335 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-nzkvz_80bec2ab-0a88-4818-9339-760edda3b07e/ovn-controller/0.log" Jan 29 12:19:30 crc kubenswrapper[4753]: I0129 12:19:30.776873 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" Jan 29 12:19:30 crc kubenswrapper[4753]: I0129 12:19:30.855841 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-plnc9"] Jan 29 12:19:30 crc kubenswrapper[4753]: E0129 12:19:30.856243 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="80bec2ab-0a88-4818-9339-760edda3b07e" containerName="nbdb" Jan 29 12:19:30 crc kubenswrapper[4753]: I0129 12:19:30.856303 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="80bec2ab-0a88-4818-9339-760edda3b07e" containerName="nbdb" Jan 29 12:19:30 crc kubenswrapper[4753]: E0129 12:19:30.856335 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="80bec2ab-0a88-4818-9339-760edda3b07e" containerName="ovnkube-controller" Jan 29 12:19:30 crc kubenswrapper[4753]: I0129 12:19:30.856347 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="80bec2ab-0a88-4818-9339-760edda3b07e" containerName="ovnkube-controller" Jan 29 12:19:30 crc kubenswrapper[4753]: E0129 12:19:30.856358 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="80bec2ab-0a88-4818-9339-760edda3b07e" containerName="ovnkube-controller" Jan 29 12:19:30 crc kubenswrapper[4753]: I0129 12:19:30.856366 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="80bec2ab-0a88-4818-9339-760edda3b07e" containerName="ovnkube-controller" Jan 29 12:19:30 crc kubenswrapper[4753]: E0129 12:19:30.856383 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="80bec2ab-0a88-4818-9339-760edda3b07e" containerName="kube-rbac-proxy-ovn-metrics" Jan 29 12:19:30 crc kubenswrapper[4753]: I0129 12:19:30.856394 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="80bec2ab-0a88-4818-9339-760edda3b07e" containerName="kube-rbac-proxy-ovn-metrics" Jan 29 12:19:30 crc kubenswrapper[4753]: E0129 12:19:30.856402 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="df61f830-b312-4a01-8d17-057799312936" containerName="registry" Jan 29 12:19:30 crc kubenswrapper[4753]: I0129 12:19:30.856409 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="df61f830-b312-4a01-8d17-057799312936" containerName="registry" Jan 29 12:19:30 crc kubenswrapper[4753]: E0129 12:19:30.856419 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="80bec2ab-0a88-4818-9339-760edda3b07e" containerName="ovn-controller" Jan 29 12:19:30 crc kubenswrapper[4753]: I0129 12:19:30.856426 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="80bec2ab-0a88-4818-9339-760edda3b07e" containerName="ovn-controller" Jan 29 12:19:30 crc kubenswrapper[4753]: E0129 12:19:30.856438 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="80bec2ab-0a88-4818-9339-760edda3b07e" containerName="kube-rbac-proxy-node" Jan 29 12:19:30 crc kubenswrapper[4753]: I0129 12:19:30.856446 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="80bec2ab-0a88-4818-9339-760edda3b07e" containerName="kube-rbac-proxy-node" Jan 29 12:19:30 crc kubenswrapper[4753]: E0129 12:19:30.856456 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="80bec2ab-0a88-4818-9339-760edda3b07e" containerName="kubecfg-setup" Jan 29 12:19:30 crc kubenswrapper[4753]: I0129 12:19:30.856464 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="80bec2ab-0a88-4818-9339-760edda3b07e" containerName="kubecfg-setup" Jan 29 12:19:30 crc kubenswrapper[4753]: E0129 12:19:30.856475 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="80bec2ab-0a88-4818-9339-760edda3b07e" containerName="sbdb" Jan 29 12:19:30 crc kubenswrapper[4753]: I0129 12:19:30.856482 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="80bec2ab-0a88-4818-9339-760edda3b07e" containerName="sbdb" Jan 29 12:19:30 crc kubenswrapper[4753]: E0129 12:19:30.856490 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="80bec2ab-0a88-4818-9339-760edda3b07e" containerName="northd" Jan 29 12:19:30 crc kubenswrapper[4753]: I0129 12:19:30.856497 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="80bec2ab-0a88-4818-9339-760edda3b07e" containerName="northd" Jan 29 12:19:30 crc kubenswrapper[4753]: E0129 12:19:30.856507 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="80bec2ab-0a88-4818-9339-760edda3b07e" containerName="ovn-acl-logging" Jan 29 12:19:30 crc kubenswrapper[4753]: I0129 12:19:30.856516 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="80bec2ab-0a88-4818-9339-760edda3b07e" containerName="ovn-acl-logging" Jan 29 12:19:30 crc kubenswrapper[4753]: I0129 12:19:30.856749 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="80bec2ab-0a88-4818-9339-760edda3b07e" containerName="nbdb" Jan 29 12:19:30 crc kubenswrapper[4753]: I0129 12:19:30.856778 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="80bec2ab-0a88-4818-9339-760edda3b07e" containerName="ovnkube-controller" Jan 29 12:19:30 crc kubenswrapper[4753]: I0129 12:19:30.856786 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="80bec2ab-0a88-4818-9339-760edda3b07e" containerName="kube-rbac-proxy-ovn-metrics" Jan 29 12:19:30 crc kubenswrapper[4753]: I0129 12:19:30.856795 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="80bec2ab-0a88-4818-9339-760edda3b07e" containerName="ovn-controller" Jan 29 12:19:30 crc kubenswrapper[4753]: I0129 12:19:30.856804 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="80bec2ab-0a88-4818-9339-760edda3b07e" containerName="ovnkube-controller" Jan 29 12:19:30 crc kubenswrapper[4753]: I0129 12:19:30.856815 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="80bec2ab-0a88-4818-9339-760edda3b07e" containerName="kube-rbac-proxy-node" Jan 29 12:19:30 crc kubenswrapper[4753]: I0129 12:19:30.856824 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="80bec2ab-0a88-4818-9339-760edda3b07e" containerName="sbdb" Jan 29 12:19:30 crc kubenswrapper[4753]: I0129 12:19:30.856834 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="80bec2ab-0a88-4818-9339-760edda3b07e" containerName="ovnkube-controller" Jan 29 12:19:30 crc kubenswrapper[4753]: I0129 12:19:30.856843 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="80bec2ab-0a88-4818-9339-760edda3b07e" containerName="ovn-acl-logging" Jan 29 12:19:30 crc kubenswrapper[4753]: I0129 12:19:30.856851 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="df61f830-b312-4a01-8d17-057799312936" containerName="registry" Jan 29 12:19:30 crc kubenswrapper[4753]: I0129 12:19:30.856861 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="80bec2ab-0a88-4818-9339-760edda3b07e" containerName="northd" Jan 29 12:19:30 crc kubenswrapper[4753]: E0129 12:19:30.857003 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="80bec2ab-0a88-4818-9339-760edda3b07e" containerName="ovnkube-controller" Jan 29 12:19:30 crc kubenswrapper[4753]: I0129 12:19:30.857015 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="80bec2ab-0a88-4818-9339-760edda3b07e" containerName="ovnkube-controller" Jan 29 12:19:30 crc kubenswrapper[4753]: E0129 12:19:30.857025 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="80bec2ab-0a88-4818-9339-760edda3b07e" containerName="ovnkube-controller" Jan 29 12:19:30 crc kubenswrapper[4753]: I0129 12:19:30.857033 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="80bec2ab-0a88-4818-9339-760edda3b07e" containerName="ovnkube-controller" Jan 29 12:19:30 crc kubenswrapper[4753]: I0129 12:19:30.857150 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="80bec2ab-0a88-4818-9339-760edda3b07e" containerName="ovnkube-controller" Jan 29 12:19:30 crc kubenswrapper[4753]: I0129 12:19:30.859528 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-plnc9" Jan 29 12:19:30 crc kubenswrapper[4753]: I0129 12:19:30.955116 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/80bec2ab-0a88-4818-9339-760edda3b07e-ovn-node-metrics-cert\") pod \"80bec2ab-0a88-4818-9339-760edda3b07e\" (UID: \"80bec2ab-0a88-4818-9339-760edda3b07e\") " Jan 29 12:19:30 crc kubenswrapper[4753]: I0129 12:19:30.955178 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/80bec2ab-0a88-4818-9339-760edda3b07e-host-kubelet\") pod \"80bec2ab-0a88-4818-9339-760edda3b07e\" (UID: \"80bec2ab-0a88-4818-9339-760edda3b07e\") " Jan 29 12:19:30 crc kubenswrapper[4753]: I0129 12:19:30.955208 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/80bec2ab-0a88-4818-9339-760edda3b07e-node-log\") pod \"80bec2ab-0a88-4818-9339-760edda3b07e\" (UID: \"80bec2ab-0a88-4818-9339-760edda3b07e\") " Jan 29 12:19:30 crc kubenswrapper[4753]: I0129 12:19:30.955289 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/80bec2ab-0a88-4818-9339-760edda3b07e-ovnkube-config\") pod \"80bec2ab-0a88-4818-9339-760edda3b07e\" (UID: \"80bec2ab-0a88-4818-9339-760edda3b07e\") " Jan 29 12:19:30 crc kubenswrapper[4753]: I0129 12:19:30.955319 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/80bec2ab-0a88-4818-9339-760edda3b07e-log-socket\") pod \"80bec2ab-0a88-4818-9339-760edda3b07e\" (UID: \"80bec2ab-0a88-4818-9339-760edda3b07e\") " Jan 29 12:19:30 crc kubenswrapper[4753]: I0129 12:19:30.955349 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/80bec2ab-0a88-4818-9339-760edda3b07e-etc-openvswitch\") pod \"80bec2ab-0a88-4818-9339-760edda3b07e\" (UID: \"80bec2ab-0a88-4818-9339-760edda3b07e\") " Jan 29 12:19:30 crc kubenswrapper[4753]: I0129 12:19:30.955353 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/80bec2ab-0a88-4818-9339-760edda3b07e-host-kubelet" (OuterVolumeSpecName: "host-kubelet") pod "80bec2ab-0a88-4818-9339-760edda3b07e" (UID: "80bec2ab-0a88-4818-9339-760edda3b07e"). InnerVolumeSpecName "host-kubelet". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 12:19:30 crc kubenswrapper[4753]: I0129 12:19:30.955382 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/80bec2ab-0a88-4818-9339-760edda3b07e-host-cni-bin\") pod \"80bec2ab-0a88-4818-9339-760edda3b07e\" (UID: \"80bec2ab-0a88-4818-9339-760edda3b07e\") " Jan 29 12:19:30 crc kubenswrapper[4753]: I0129 12:19:30.955443 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/80bec2ab-0a88-4818-9339-760edda3b07e-host-cni-bin" (OuterVolumeSpecName: "host-cni-bin") pod "80bec2ab-0a88-4818-9339-760edda3b07e" (UID: "80bec2ab-0a88-4818-9339-760edda3b07e"). InnerVolumeSpecName "host-cni-bin". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 12:19:30 crc kubenswrapper[4753]: I0129 12:19:30.955465 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/80bec2ab-0a88-4818-9339-760edda3b07e-host-run-netns\") pod \"80bec2ab-0a88-4818-9339-760edda3b07e\" (UID: \"80bec2ab-0a88-4818-9339-760edda3b07e\") " Jan 29 12:19:30 crc kubenswrapper[4753]: I0129 12:19:30.955533 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/80bec2ab-0a88-4818-9339-760edda3b07e-var-lib-openvswitch\") pod \"80bec2ab-0a88-4818-9339-760edda3b07e\" (UID: \"80bec2ab-0a88-4818-9339-760edda3b07e\") " Jan 29 12:19:30 crc kubenswrapper[4753]: I0129 12:19:30.955568 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/80bec2ab-0a88-4818-9339-760edda3b07e-env-overrides\") pod \"80bec2ab-0a88-4818-9339-760edda3b07e\" (UID: \"80bec2ab-0a88-4818-9339-760edda3b07e\") " Jan 29 12:19:30 crc kubenswrapper[4753]: I0129 12:19:30.955598 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/80bec2ab-0a88-4818-9339-760edda3b07e-systemd-units\") pod \"80bec2ab-0a88-4818-9339-760edda3b07e\" (UID: \"80bec2ab-0a88-4818-9339-760edda3b07e\") " Jan 29 12:19:30 crc kubenswrapper[4753]: I0129 12:19:30.955627 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/80bec2ab-0a88-4818-9339-760edda3b07e-host-cni-netd\") pod \"80bec2ab-0a88-4818-9339-760edda3b07e\" (UID: \"80bec2ab-0a88-4818-9339-760edda3b07e\") " Jan 29 12:19:30 crc kubenswrapper[4753]: I0129 12:19:30.955660 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/80bec2ab-0a88-4818-9339-760edda3b07e-run-systemd\") pod \"80bec2ab-0a88-4818-9339-760edda3b07e\" (UID: \"80bec2ab-0a88-4818-9339-760edda3b07e\") " Jan 29 12:19:30 crc kubenswrapper[4753]: I0129 12:19:30.955684 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/80bec2ab-0a88-4818-9339-760edda3b07e-host-var-lib-cni-networks-ovn-kubernetes\") pod \"80bec2ab-0a88-4818-9339-760edda3b07e\" (UID: \"80bec2ab-0a88-4818-9339-760edda3b07e\") " Jan 29 12:19:30 crc kubenswrapper[4753]: I0129 12:19:30.955720 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nkh6d\" (UniqueName: \"kubernetes.io/projected/80bec2ab-0a88-4818-9339-760edda3b07e-kube-api-access-nkh6d\") pod \"80bec2ab-0a88-4818-9339-760edda3b07e\" (UID: \"80bec2ab-0a88-4818-9339-760edda3b07e\") " Jan 29 12:19:30 crc kubenswrapper[4753]: I0129 12:19:30.955751 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/80bec2ab-0a88-4818-9339-760edda3b07e-run-ovn\") pod \"80bec2ab-0a88-4818-9339-760edda3b07e\" (UID: \"80bec2ab-0a88-4818-9339-760edda3b07e\") " Jan 29 12:19:30 crc kubenswrapper[4753]: I0129 12:19:30.955782 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/80bec2ab-0a88-4818-9339-760edda3b07e-host-run-ovn-kubernetes\") pod \"80bec2ab-0a88-4818-9339-760edda3b07e\" (UID: \"80bec2ab-0a88-4818-9339-760edda3b07e\") " Jan 29 12:19:30 crc kubenswrapper[4753]: I0129 12:19:30.955799 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/80bec2ab-0a88-4818-9339-760edda3b07e-host-slash\") pod \"80bec2ab-0a88-4818-9339-760edda3b07e\" (UID: \"80bec2ab-0a88-4818-9339-760edda3b07e\") " Jan 29 12:19:30 crc kubenswrapper[4753]: I0129 12:19:30.955813 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/80bec2ab-0a88-4818-9339-760edda3b07e-run-openvswitch\") pod \"80bec2ab-0a88-4818-9339-760edda3b07e\" (UID: \"80bec2ab-0a88-4818-9339-760edda3b07e\") " Jan 29 12:19:30 crc kubenswrapper[4753]: I0129 12:19:30.955841 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/80bec2ab-0a88-4818-9339-760edda3b07e-ovnkube-script-lib\") pod \"80bec2ab-0a88-4818-9339-760edda3b07e\" (UID: \"80bec2ab-0a88-4818-9339-760edda3b07e\") " Jan 29 12:19:30 crc kubenswrapper[4753]: I0129 12:19:30.955486 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/80bec2ab-0a88-4818-9339-760edda3b07e-node-log" (OuterVolumeSpecName: "node-log") pod "80bec2ab-0a88-4818-9339-760edda3b07e" (UID: "80bec2ab-0a88-4818-9339-760edda3b07e"). InnerVolumeSpecName "node-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 12:19:30 crc kubenswrapper[4753]: I0129 12:19:30.956110 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/80bec2ab-0a88-4818-9339-760edda3b07e-etc-openvswitch" (OuterVolumeSpecName: "etc-openvswitch") pod "80bec2ab-0a88-4818-9339-760edda3b07e" (UID: "80bec2ab-0a88-4818-9339-760edda3b07e"). InnerVolumeSpecName "etc-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 12:19:30 crc kubenswrapper[4753]: I0129 12:19:30.956144 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/80bec2ab-0a88-4818-9339-760edda3b07e-var-lib-openvswitch" (OuterVolumeSpecName: "var-lib-openvswitch") pod "80bec2ab-0a88-4818-9339-760edda3b07e" (UID: "80bec2ab-0a88-4818-9339-760edda3b07e"). InnerVolumeSpecName "var-lib-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 12:19:30 crc kubenswrapper[4753]: I0129 12:19:30.956121 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/80bec2ab-0a88-4818-9339-760edda3b07e-log-socket" (OuterVolumeSpecName: "log-socket") pod "80bec2ab-0a88-4818-9339-760edda3b07e" (UID: "80bec2ab-0a88-4818-9339-760edda3b07e"). InnerVolumeSpecName "log-socket". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 12:19:30 crc kubenswrapper[4753]: I0129 12:19:30.956166 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/80bec2ab-0a88-4818-9339-760edda3b07e-host-run-netns" (OuterVolumeSpecName: "host-run-netns") pod "80bec2ab-0a88-4818-9339-760edda3b07e" (UID: "80bec2ab-0a88-4818-9339-760edda3b07e"). InnerVolumeSpecName "host-run-netns". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 12:19:30 crc kubenswrapper[4753]: I0129 12:19:30.956167 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/80bec2ab-0a88-4818-9339-760edda3b07e-run-ovn" (OuterVolumeSpecName: "run-ovn") pod "80bec2ab-0a88-4818-9339-760edda3b07e" (UID: "80bec2ab-0a88-4818-9339-760edda3b07e"). InnerVolumeSpecName "run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 12:19:30 crc kubenswrapper[4753]: I0129 12:19:30.956250 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/80bec2ab-0a88-4818-9339-760edda3b07e-host-run-ovn-kubernetes" (OuterVolumeSpecName: "host-run-ovn-kubernetes") pod "80bec2ab-0a88-4818-9339-760edda3b07e" (UID: "80bec2ab-0a88-4818-9339-760edda3b07e"). InnerVolumeSpecName "host-run-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 12:19:30 crc kubenswrapper[4753]: I0129 12:19:30.956258 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/80bec2ab-0a88-4818-9339-760edda3b07e-run-openvswitch" (OuterVolumeSpecName: "run-openvswitch") pod "80bec2ab-0a88-4818-9339-760edda3b07e" (UID: "80bec2ab-0a88-4818-9339-760edda3b07e"). InnerVolumeSpecName "run-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 12:19:30 crc kubenswrapper[4753]: I0129 12:19:30.956272 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/80bec2ab-0a88-4818-9339-760edda3b07e-host-slash" (OuterVolumeSpecName: "host-slash") pod "80bec2ab-0a88-4818-9339-760edda3b07e" (UID: "80bec2ab-0a88-4818-9339-760edda3b07e"). InnerVolumeSpecName "host-slash". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 12:19:30 crc kubenswrapper[4753]: I0129 12:19:30.956340 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/80bec2ab-0a88-4818-9339-760edda3b07e-host-cni-netd" (OuterVolumeSpecName: "host-cni-netd") pod "80bec2ab-0a88-4818-9339-760edda3b07e" (UID: "80bec2ab-0a88-4818-9339-760edda3b07e"). InnerVolumeSpecName "host-cni-netd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 12:19:30 crc kubenswrapper[4753]: I0129 12:19:30.956361 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/80bec2ab-0a88-4818-9339-760edda3b07e-systemd-units" (OuterVolumeSpecName: "systemd-units") pod "80bec2ab-0a88-4818-9339-760edda3b07e" (UID: "80bec2ab-0a88-4818-9339-760edda3b07e"). InnerVolumeSpecName "systemd-units". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 12:19:30 crc kubenswrapper[4753]: I0129 12:19:30.956553 4753 reconciler_common.go:293] "Volume detached for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/80bec2ab-0a88-4818-9339-760edda3b07e-host-cni-netd\") on node \"crc\" DevicePath \"\"" Jan 29 12:19:30 crc kubenswrapper[4753]: I0129 12:19:30.956576 4753 reconciler_common.go:293] "Volume detached for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/80bec2ab-0a88-4818-9339-760edda3b07e-host-kubelet\") on node \"crc\" DevicePath \"\"" Jan 29 12:19:30 crc kubenswrapper[4753]: I0129 12:19:30.956585 4753 reconciler_common.go:293] "Volume detached for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/80bec2ab-0a88-4818-9339-760edda3b07e-node-log\") on node \"crc\" DevicePath \"\"" Jan 29 12:19:30 crc kubenswrapper[4753]: I0129 12:19:30.956593 4753 reconciler_common.go:293] "Volume detached for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/80bec2ab-0a88-4818-9339-760edda3b07e-host-cni-bin\") on node \"crc\" DevicePath \"\"" Jan 29 12:19:30 crc kubenswrapper[4753]: I0129 12:19:30.956602 4753 reconciler_common.go:293] "Volume detached for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/80bec2ab-0a88-4818-9339-760edda3b07e-var-lib-openvswitch\") on node \"crc\" DevicePath \"\"" Jan 29 12:19:30 crc kubenswrapper[4753]: I0129 12:19:30.956612 4753 reconciler_common.go:293] "Volume detached for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/80bec2ab-0a88-4818-9339-760edda3b07e-systemd-units\") on node \"crc\" DevicePath \"\"" Jan 29 12:19:30 crc kubenswrapper[4753]: I0129 12:19:30.956800 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/80bec2ab-0a88-4818-9339-760edda3b07e-host-var-lib-cni-networks-ovn-kubernetes" (OuterVolumeSpecName: "host-var-lib-cni-networks-ovn-kubernetes") pod "80bec2ab-0a88-4818-9339-760edda3b07e" (UID: "80bec2ab-0a88-4818-9339-760edda3b07e"). InnerVolumeSpecName "host-var-lib-cni-networks-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 12:19:30 crc kubenswrapper[4753]: I0129 12:19:30.957018 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/80bec2ab-0a88-4818-9339-760edda3b07e-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "80bec2ab-0a88-4818-9339-760edda3b07e" (UID: "80bec2ab-0a88-4818-9339-760edda3b07e"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:19:30 crc kubenswrapper[4753]: I0129 12:19:30.957048 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/80bec2ab-0a88-4818-9339-760edda3b07e-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "80bec2ab-0a88-4818-9339-760edda3b07e" (UID: "80bec2ab-0a88-4818-9339-760edda3b07e"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:19:30 crc kubenswrapper[4753]: I0129 12:19:30.956828 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/80bec2ab-0a88-4818-9339-760edda3b07e-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "80bec2ab-0a88-4818-9339-760edda3b07e" (UID: "80bec2ab-0a88-4818-9339-760edda3b07e"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:19:30 crc kubenswrapper[4753]: I0129 12:19:30.962260 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/80bec2ab-0a88-4818-9339-760edda3b07e-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "80bec2ab-0a88-4818-9339-760edda3b07e" (UID: "80bec2ab-0a88-4818-9339-760edda3b07e"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:19:30 crc kubenswrapper[4753]: I0129 12:19:30.962351 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/80bec2ab-0a88-4818-9339-760edda3b07e-kube-api-access-nkh6d" (OuterVolumeSpecName: "kube-api-access-nkh6d") pod "80bec2ab-0a88-4818-9339-760edda3b07e" (UID: "80bec2ab-0a88-4818-9339-760edda3b07e"). InnerVolumeSpecName "kube-api-access-nkh6d". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:19:30 crc kubenswrapper[4753]: I0129 12:19:30.970641 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/80bec2ab-0a88-4818-9339-760edda3b07e-run-systemd" (OuterVolumeSpecName: "run-systemd") pod "80bec2ab-0a88-4818-9339-760edda3b07e" (UID: "80bec2ab-0a88-4818-9339-760edda3b07e"). InnerVolumeSpecName "run-systemd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.057710 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/619dbf8f-19c9-4a31-ad2a-8a8a827a9087-run-ovn\") pod \"ovnkube-node-plnc9\" (UID: \"619dbf8f-19c9-4a31-ad2a-8a8a827a9087\") " pod="openshift-ovn-kubernetes/ovnkube-node-plnc9" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.058185 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/619dbf8f-19c9-4a31-ad2a-8a8a827a9087-node-log\") pod \"ovnkube-node-plnc9\" (UID: \"619dbf8f-19c9-4a31-ad2a-8a8a827a9087\") " pod="openshift-ovn-kubernetes/ovnkube-node-plnc9" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.058390 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/619dbf8f-19c9-4a31-ad2a-8a8a827a9087-host-cni-bin\") pod \"ovnkube-node-plnc9\" (UID: \"619dbf8f-19c9-4a31-ad2a-8a8a827a9087\") " pod="openshift-ovn-kubernetes/ovnkube-node-plnc9" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.058476 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/619dbf8f-19c9-4a31-ad2a-8a8a827a9087-host-cni-netd\") pod \"ovnkube-node-plnc9\" (UID: \"619dbf8f-19c9-4a31-ad2a-8a8a827a9087\") " pod="openshift-ovn-kubernetes/ovnkube-node-plnc9" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.058499 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/619dbf8f-19c9-4a31-ad2a-8a8a827a9087-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-plnc9\" (UID: \"619dbf8f-19c9-4a31-ad2a-8a8a827a9087\") " pod="openshift-ovn-kubernetes/ovnkube-node-plnc9" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.058519 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/619dbf8f-19c9-4a31-ad2a-8a8a827a9087-run-openvswitch\") pod \"ovnkube-node-plnc9\" (UID: \"619dbf8f-19c9-4a31-ad2a-8a8a827a9087\") " pod="openshift-ovn-kubernetes/ovnkube-node-plnc9" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.058539 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/619dbf8f-19c9-4a31-ad2a-8a8a827a9087-host-run-ovn-kubernetes\") pod \"ovnkube-node-plnc9\" (UID: \"619dbf8f-19c9-4a31-ad2a-8a8a827a9087\") " pod="openshift-ovn-kubernetes/ovnkube-node-plnc9" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.058710 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/619dbf8f-19c9-4a31-ad2a-8a8a827a9087-ovnkube-config\") pod \"ovnkube-node-plnc9\" (UID: \"619dbf8f-19c9-4a31-ad2a-8a8a827a9087\") " pod="openshift-ovn-kubernetes/ovnkube-node-plnc9" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.058791 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/619dbf8f-19c9-4a31-ad2a-8a8a827a9087-systemd-units\") pod \"ovnkube-node-plnc9\" (UID: \"619dbf8f-19c9-4a31-ad2a-8a8a827a9087\") " pod="openshift-ovn-kubernetes/ovnkube-node-plnc9" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.058831 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/619dbf8f-19c9-4a31-ad2a-8a8a827a9087-log-socket\") pod \"ovnkube-node-plnc9\" (UID: \"619dbf8f-19c9-4a31-ad2a-8a8a827a9087\") " pod="openshift-ovn-kubernetes/ovnkube-node-plnc9" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.058889 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/619dbf8f-19c9-4a31-ad2a-8a8a827a9087-etc-openvswitch\") pod \"ovnkube-node-plnc9\" (UID: \"619dbf8f-19c9-4a31-ad2a-8a8a827a9087\") " pod="openshift-ovn-kubernetes/ovnkube-node-plnc9" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.058932 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/619dbf8f-19c9-4a31-ad2a-8a8a827a9087-var-lib-openvswitch\") pod \"ovnkube-node-plnc9\" (UID: \"619dbf8f-19c9-4a31-ad2a-8a8a827a9087\") " pod="openshift-ovn-kubernetes/ovnkube-node-plnc9" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.058989 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/619dbf8f-19c9-4a31-ad2a-8a8a827a9087-host-slash\") pod \"ovnkube-node-plnc9\" (UID: \"619dbf8f-19c9-4a31-ad2a-8a8a827a9087\") " pod="openshift-ovn-kubernetes/ovnkube-node-plnc9" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.059026 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/619dbf8f-19c9-4a31-ad2a-8a8a827a9087-host-run-netns\") pod \"ovnkube-node-plnc9\" (UID: \"619dbf8f-19c9-4a31-ad2a-8a8a827a9087\") " pod="openshift-ovn-kubernetes/ovnkube-node-plnc9" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.059054 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/619dbf8f-19c9-4a31-ad2a-8a8a827a9087-run-systemd\") pod \"ovnkube-node-plnc9\" (UID: \"619dbf8f-19c9-4a31-ad2a-8a8a827a9087\") " pod="openshift-ovn-kubernetes/ovnkube-node-plnc9" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.059087 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/619dbf8f-19c9-4a31-ad2a-8a8a827a9087-ovn-node-metrics-cert\") pod \"ovnkube-node-plnc9\" (UID: \"619dbf8f-19c9-4a31-ad2a-8a8a827a9087\") " pod="openshift-ovn-kubernetes/ovnkube-node-plnc9" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.059133 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bkz5d\" (UniqueName: \"kubernetes.io/projected/619dbf8f-19c9-4a31-ad2a-8a8a827a9087-kube-api-access-bkz5d\") pod \"ovnkube-node-plnc9\" (UID: \"619dbf8f-19c9-4a31-ad2a-8a8a827a9087\") " pod="openshift-ovn-kubernetes/ovnkube-node-plnc9" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.059166 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/619dbf8f-19c9-4a31-ad2a-8a8a827a9087-env-overrides\") pod \"ovnkube-node-plnc9\" (UID: \"619dbf8f-19c9-4a31-ad2a-8a8a827a9087\") " pod="openshift-ovn-kubernetes/ovnkube-node-plnc9" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.059206 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/619dbf8f-19c9-4a31-ad2a-8a8a827a9087-ovnkube-script-lib\") pod \"ovnkube-node-plnc9\" (UID: \"619dbf8f-19c9-4a31-ad2a-8a8a827a9087\") " pod="openshift-ovn-kubernetes/ovnkube-node-plnc9" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.059275 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/619dbf8f-19c9-4a31-ad2a-8a8a827a9087-host-kubelet\") pod \"ovnkube-node-plnc9\" (UID: \"619dbf8f-19c9-4a31-ad2a-8a8a827a9087\") " pod="openshift-ovn-kubernetes/ovnkube-node-plnc9" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.059340 4753 reconciler_common.go:293] "Volume detached for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/80bec2ab-0a88-4818-9339-760edda3b07e-run-systemd\") on node \"crc\" DevicePath \"\"" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.059364 4753 reconciler_common.go:293] "Volume detached for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/80bec2ab-0a88-4818-9339-760edda3b07e-host-var-lib-cni-networks-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.059394 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nkh6d\" (UniqueName: \"kubernetes.io/projected/80bec2ab-0a88-4818-9339-760edda3b07e-kube-api-access-nkh6d\") on node \"crc\" DevicePath \"\"" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.059485 4753 reconciler_common.go:293] "Volume detached for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/80bec2ab-0a88-4818-9339-760edda3b07e-run-ovn\") on node \"crc\" DevicePath \"\"" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.059578 4753 reconciler_common.go:293] "Volume detached for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/80bec2ab-0a88-4818-9339-760edda3b07e-host-run-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.059606 4753 reconciler_common.go:293] "Volume detached for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/80bec2ab-0a88-4818-9339-760edda3b07e-host-slash\") on node \"crc\" DevicePath \"\"" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.059624 4753 reconciler_common.go:293] "Volume detached for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/80bec2ab-0a88-4818-9339-760edda3b07e-run-openvswitch\") on node \"crc\" DevicePath \"\"" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.059641 4753 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/80bec2ab-0a88-4818-9339-760edda3b07e-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.059659 4753 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/80bec2ab-0a88-4818-9339-760edda3b07e-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.059675 4753 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/80bec2ab-0a88-4818-9339-760edda3b07e-ovnkube-config\") on node \"crc\" DevicePath \"\"" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.059685 4753 reconciler_common.go:293] "Volume detached for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/80bec2ab-0a88-4818-9339-760edda3b07e-log-socket\") on node \"crc\" DevicePath \"\"" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.059696 4753 reconciler_common.go:293] "Volume detached for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/80bec2ab-0a88-4818-9339-760edda3b07e-etc-openvswitch\") on node \"crc\" DevicePath \"\"" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.059710 4753 reconciler_common.go:293] "Volume detached for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/80bec2ab-0a88-4818-9339-760edda3b07e-host-run-netns\") on node \"crc\" DevicePath \"\"" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.059721 4753 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/80bec2ab-0a88-4818-9339-760edda3b07e-env-overrides\") on node \"crc\" DevicePath \"\"" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.160461 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bkz5d\" (UniqueName: \"kubernetes.io/projected/619dbf8f-19c9-4a31-ad2a-8a8a827a9087-kube-api-access-bkz5d\") pod \"ovnkube-node-plnc9\" (UID: \"619dbf8f-19c9-4a31-ad2a-8a8a827a9087\") " pod="openshift-ovn-kubernetes/ovnkube-node-plnc9" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.160526 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/619dbf8f-19c9-4a31-ad2a-8a8a827a9087-env-overrides\") pod \"ovnkube-node-plnc9\" (UID: \"619dbf8f-19c9-4a31-ad2a-8a8a827a9087\") " pod="openshift-ovn-kubernetes/ovnkube-node-plnc9" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.160547 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/619dbf8f-19c9-4a31-ad2a-8a8a827a9087-ovnkube-script-lib\") pod \"ovnkube-node-plnc9\" (UID: \"619dbf8f-19c9-4a31-ad2a-8a8a827a9087\") " pod="openshift-ovn-kubernetes/ovnkube-node-plnc9" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.160575 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/619dbf8f-19c9-4a31-ad2a-8a8a827a9087-host-kubelet\") pod \"ovnkube-node-plnc9\" (UID: \"619dbf8f-19c9-4a31-ad2a-8a8a827a9087\") " pod="openshift-ovn-kubernetes/ovnkube-node-plnc9" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.160597 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/619dbf8f-19c9-4a31-ad2a-8a8a827a9087-run-ovn\") pod \"ovnkube-node-plnc9\" (UID: \"619dbf8f-19c9-4a31-ad2a-8a8a827a9087\") " pod="openshift-ovn-kubernetes/ovnkube-node-plnc9" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.160621 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/619dbf8f-19c9-4a31-ad2a-8a8a827a9087-node-log\") pod \"ovnkube-node-plnc9\" (UID: \"619dbf8f-19c9-4a31-ad2a-8a8a827a9087\") " pod="openshift-ovn-kubernetes/ovnkube-node-plnc9" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.160642 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/619dbf8f-19c9-4a31-ad2a-8a8a827a9087-host-cni-bin\") pod \"ovnkube-node-plnc9\" (UID: \"619dbf8f-19c9-4a31-ad2a-8a8a827a9087\") " pod="openshift-ovn-kubernetes/ovnkube-node-plnc9" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.160686 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/619dbf8f-19c9-4a31-ad2a-8a8a827a9087-host-cni-netd\") pod \"ovnkube-node-plnc9\" (UID: \"619dbf8f-19c9-4a31-ad2a-8a8a827a9087\") " pod="openshift-ovn-kubernetes/ovnkube-node-plnc9" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.160718 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/619dbf8f-19c9-4a31-ad2a-8a8a827a9087-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-plnc9\" (UID: \"619dbf8f-19c9-4a31-ad2a-8a8a827a9087\") " pod="openshift-ovn-kubernetes/ovnkube-node-plnc9" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.160746 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/619dbf8f-19c9-4a31-ad2a-8a8a827a9087-run-openvswitch\") pod \"ovnkube-node-plnc9\" (UID: \"619dbf8f-19c9-4a31-ad2a-8a8a827a9087\") " pod="openshift-ovn-kubernetes/ovnkube-node-plnc9" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.160761 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/619dbf8f-19c9-4a31-ad2a-8a8a827a9087-host-run-ovn-kubernetes\") pod \"ovnkube-node-plnc9\" (UID: \"619dbf8f-19c9-4a31-ad2a-8a8a827a9087\") " pod="openshift-ovn-kubernetes/ovnkube-node-plnc9" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.160780 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/619dbf8f-19c9-4a31-ad2a-8a8a827a9087-ovnkube-config\") pod \"ovnkube-node-plnc9\" (UID: \"619dbf8f-19c9-4a31-ad2a-8a8a827a9087\") " pod="openshift-ovn-kubernetes/ovnkube-node-plnc9" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.160800 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/619dbf8f-19c9-4a31-ad2a-8a8a827a9087-systemd-units\") pod \"ovnkube-node-plnc9\" (UID: \"619dbf8f-19c9-4a31-ad2a-8a8a827a9087\") " pod="openshift-ovn-kubernetes/ovnkube-node-plnc9" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.160834 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/619dbf8f-19c9-4a31-ad2a-8a8a827a9087-log-socket\") pod \"ovnkube-node-plnc9\" (UID: \"619dbf8f-19c9-4a31-ad2a-8a8a827a9087\") " pod="openshift-ovn-kubernetes/ovnkube-node-plnc9" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.160836 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/619dbf8f-19c9-4a31-ad2a-8a8a827a9087-run-ovn\") pod \"ovnkube-node-plnc9\" (UID: \"619dbf8f-19c9-4a31-ad2a-8a8a827a9087\") " pod="openshift-ovn-kubernetes/ovnkube-node-plnc9" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.160905 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/619dbf8f-19c9-4a31-ad2a-8a8a827a9087-node-log\") pod \"ovnkube-node-plnc9\" (UID: \"619dbf8f-19c9-4a31-ad2a-8a8a827a9087\") " pod="openshift-ovn-kubernetes/ovnkube-node-plnc9" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.160909 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/619dbf8f-19c9-4a31-ad2a-8a8a827a9087-etc-openvswitch\") pod \"ovnkube-node-plnc9\" (UID: \"619dbf8f-19c9-4a31-ad2a-8a8a827a9087\") " pod="openshift-ovn-kubernetes/ovnkube-node-plnc9" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.160950 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/619dbf8f-19c9-4a31-ad2a-8a8a827a9087-host-cni-bin\") pod \"ovnkube-node-plnc9\" (UID: \"619dbf8f-19c9-4a31-ad2a-8a8a827a9087\") " pod="openshift-ovn-kubernetes/ovnkube-node-plnc9" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.160972 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/619dbf8f-19c9-4a31-ad2a-8a8a827a9087-host-cni-netd\") pod \"ovnkube-node-plnc9\" (UID: \"619dbf8f-19c9-4a31-ad2a-8a8a827a9087\") " pod="openshift-ovn-kubernetes/ovnkube-node-plnc9" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.160994 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/619dbf8f-19c9-4a31-ad2a-8a8a827a9087-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-plnc9\" (UID: \"619dbf8f-19c9-4a31-ad2a-8a8a827a9087\") " pod="openshift-ovn-kubernetes/ovnkube-node-plnc9" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.161014 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/619dbf8f-19c9-4a31-ad2a-8a8a827a9087-run-openvswitch\") pod \"ovnkube-node-plnc9\" (UID: \"619dbf8f-19c9-4a31-ad2a-8a8a827a9087\") " pod="openshift-ovn-kubernetes/ovnkube-node-plnc9" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.161033 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/619dbf8f-19c9-4a31-ad2a-8a8a827a9087-host-run-ovn-kubernetes\") pod \"ovnkube-node-plnc9\" (UID: \"619dbf8f-19c9-4a31-ad2a-8a8a827a9087\") " pod="openshift-ovn-kubernetes/ovnkube-node-plnc9" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.161084 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/619dbf8f-19c9-4a31-ad2a-8a8a827a9087-systemd-units\") pod \"ovnkube-node-plnc9\" (UID: \"619dbf8f-19c9-4a31-ad2a-8a8a827a9087\") " pod="openshift-ovn-kubernetes/ovnkube-node-plnc9" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.161148 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/619dbf8f-19c9-4a31-ad2a-8a8a827a9087-log-socket\") pod \"ovnkube-node-plnc9\" (UID: \"619dbf8f-19c9-4a31-ad2a-8a8a827a9087\") " pod="openshift-ovn-kubernetes/ovnkube-node-plnc9" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.161178 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/619dbf8f-19c9-4a31-ad2a-8a8a827a9087-host-kubelet\") pod \"ovnkube-node-plnc9\" (UID: \"619dbf8f-19c9-4a31-ad2a-8a8a827a9087\") " pod="openshift-ovn-kubernetes/ovnkube-node-plnc9" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.160857 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/619dbf8f-19c9-4a31-ad2a-8a8a827a9087-etc-openvswitch\") pod \"ovnkube-node-plnc9\" (UID: \"619dbf8f-19c9-4a31-ad2a-8a8a827a9087\") " pod="openshift-ovn-kubernetes/ovnkube-node-plnc9" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.161283 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/619dbf8f-19c9-4a31-ad2a-8a8a827a9087-var-lib-openvswitch\") pod \"ovnkube-node-plnc9\" (UID: \"619dbf8f-19c9-4a31-ad2a-8a8a827a9087\") " pod="openshift-ovn-kubernetes/ovnkube-node-plnc9" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.161346 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/619dbf8f-19c9-4a31-ad2a-8a8a827a9087-host-slash\") pod \"ovnkube-node-plnc9\" (UID: \"619dbf8f-19c9-4a31-ad2a-8a8a827a9087\") " pod="openshift-ovn-kubernetes/ovnkube-node-plnc9" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.161392 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/619dbf8f-19c9-4a31-ad2a-8a8a827a9087-host-run-netns\") pod \"ovnkube-node-plnc9\" (UID: \"619dbf8f-19c9-4a31-ad2a-8a8a827a9087\") " pod="openshift-ovn-kubernetes/ovnkube-node-plnc9" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.161412 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/619dbf8f-19c9-4a31-ad2a-8a8a827a9087-run-systemd\") pod \"ovnkube-node-plnc9\" (UID: \"619dbf8f-19c9-4a31-ad2a-8a8a827a9087\") " pod="openshift-ovn-kubernetes/ovnkube-node-plnc9" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.161454 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/619dbf8f-19c9-4a31-ad2a-8a8a827a9087-host-slash\") pod \"ovnkube-node-plnc9\" (UID: \"619dbf8f-19c9-4a31-ad2a-8a8a827a9087\") " pod="openshift-ovn-kubernetes/ovnkube-node-plnc9" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.161491 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/619dbf8f-19c9-4a31-ad2a-8a8a827a9087-host-run-netns\") pod \"ovnkube-node-plnc9\" (UID: \"619dbf8f-19c9-4a31-ad2a-8a8a827a9087\") " pod="openshift-ovn-kubernetes/ovnkube-node-plnc9" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.161496 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/619dbf8f-19c9-4a31-ad2a-8a8a827a9087-var-lib-openvswitch\") pod \"ovnkube-node-plnc9\" (UID: \"619dbf8f-19c9-4a31-ad2a-8a8a827a9087\") " pod="openshift-ovn-kubernetes/ovnkube-node-plnc9" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.161518 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/619dbf8f-19c9-4a31-ad2a-8a8a827a9087-env-overrides\") pod \"ovnkube-node-plnc9\" (UID: \"619dbf8f-19c9-4a31-ad2a-8a8a827a9087\") " pod="openshift-ovn-kubernetes/ovnkube-node-plnc9" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.161453 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/619dbf8f-19c9-4a31-ad2a-8a8a827a9087-ovn-node-metrics-cert\") pod \"ovnkube-node-plnc9\" (UID: \"619dbf8f-19c9-4a31-ad2a-8a8a827a9087\") " pod="openshift-ovn-kubernetes/ovnkube-node-plnc9" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.161698 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/619dbf8f-19c9-4a31-ad2a-8a8a827a9087-ovnkube-script-lib\") pod \"ovnkube-node-plnc9\" (UID: \"619dbf8f-19c9-4a31-ad2a-8a8a827a9087\") " pod="openshift-ovn-kubernetes/ovnkube-node-plnc9" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.161516 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/619dbf8f-19c9-4a31-ad2a-8a8a827a9087-run-systemd\") pod \"ovnkube-node-plnc9\" (UID: \"619dbf8f-19c9-4a31-ad2a-8a8a827a9087\") " pod="openshift-ovn-kubernetes/ovnkube-node-plnc9" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.162002 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/619dbf8f-19c9-4a31-ad2a-8a8a827a9087-ovnkube-config\") pod \"ovnkube-node-plnc9\" (UID: \"619dbf8f-19c9-4a31-ad2a-8a8a827a9087\") " pod="openshift-ovn-kubernetes/ovnkube-node-plnc9" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.175243 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/619dbf8f-19c9-4a31-ad2a-8a8a827a9087-ovn-node-metrics-cert\") pod \"ovnkube-node-plnc9\" (UID: \"619dbf8f-19c9-4a31-ad2a-8a8a827a9087\") " pod="openshift-ovn-kubernetes/ovnkube-node-plnc9" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.179947 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bkz5d\" (UniqueName: \"kubernetes.io/projected/619dbf8f-19c9-4a31-ad2a-8a8a827a9087-kube-api-access-bkz5d\") pod \"ovnkube-node-plnc9\" (UID: \"619dbf8f-19c9-4a31-ad2a-8a8a827a9087\") " pod="openshift-ovn-kubernetes/ovnkube-node-plnc9" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.449597 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-nzkvz_80bec2ab-0a88-4818-9339-760edda3b07e/ovnkube-controller/2.log" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.461744 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-nzkvz_80bec2ab-0a88-4818-9339-760edda3b07e/ovn-acl-logging/0.log" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.462161 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-nzkvz_80bec2ab-0a88-4818-9339-760edda3b07e/ovn-controller/0.log" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.462534 4753 generic.go:334] "Generic (PLEG): container finished" podID="80bec2ab-0a88-4818-9339-760edda3b07e" containerID="9ee2d75fd4d36ec3e097a8aa1ca55cef7754f5a5ed88d149bcf63e90d0fbf2c9" exitCode=0 Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.462570 4753 generic.go:334] "Generic (PLEG): container finished" podID="80bec2ab-0a88-4818-9339-760edda3b07e" containerID="8e40e5b13dfdc135ea09253d466bacdfc5c2ac3d28eabc76d833d71ac300905c" exitCode=0 Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.462579 4753 generic.go:334] "Generic (PLEG): container finished" podID="80bec2ab-0a88-4818-9339-760edda3b07e" containerID="f71859413676dab4a435d43c11ad7cba315a12de744c3c8b161273f6ad36580c" exitCode=0 Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.462586 4753 generic.go:334] "Generic (PLEG): container finished" podID="80bec2ab-0a88-4818-9339-760edda3b07e" containerID="b0ec2a21ec299b5eed363668f39a270a8b03ea79b9de3dd23bda0c30579c3de3" exitCode=0 Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.462593 4753 generic.go:334] "Generic (PLEG): container finished" podID="80bec2ab-0a88-4818-9339-760edda3b07e" containerID="1e6a7d8df812546f17b3e026005c5062086e97e3fa364b3fa22cefec025e4eba" exitCode=0 Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.462599 4753 generic.go:334] "Generic (PLEG): container finished" podID="80bec2ab-0a88-4818-9339-760edda3b07e" containerID="0f15016e29f1fc14f51b4d01e772071333fd8a885cacc1804b3a03dbcfc93836" exitCode=0 Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.462606 4753 generic.go:334] "Generic (PLEG): container finished" podID="80bec2ab-0a88-4818-9339-760edda3b07e" containerID="91c1df8b6763320498420463a3841f0bfa4fcf753c355f549c3c1e93bf5206a8" exitCode=143 Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.462614 4753 generic.go:334] "Generic (PLEG): container finished" podID="80bec2ab-0a88-4818-9339-760edda3b07e" containerID="05d1e99596530f467ecdae51aec8db191ac045cd85e121fc9835e2f3688f6ae7" exitCode=143 Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.462655 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" event={"ID":"80bec2ab-0a88-4818-9339-760edda3b07e","Type":"ContainerDied","Data":"9ee2d75fd4d36ec3e097a8aa1ca55cef7754f5a5ed88d149bcf63e90d0fbf2c9"} Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.462691 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" event={"ID":"80bec2ab-0a88-4818-9339-760edda3b07e","Type":"ContainerDied","Data":"8e40e5b13dfdc135ea09253d466bacdfc5c2ac3d28eabc76d833d71ac300905c"} Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.462703 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" event={"ID":"80bec2ab-0a88-4818-9339-760edda3b07e","Type":"ContainerDied","Data":"f71859413676dab4a435d43c11ad7cba315a12de744c3c8b161273f6ad36580c"} Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.462713 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" event={"ID":"80bec2ab-0a88-4818-9339-760edda3b07e","Type":"ContainerDied","Data":"b0ec2a21ec299b5eed363668f39a270a8b03ea79b9de3dd23bda0c30579c3de3"} Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.462722 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" event={"ID":"80bec2ab-0a88-4818-9339-760edda3b07e","Type":"ContainerDied","Data":"1e6a7d8df812546f17b3e026005c5062086e97e3fa364b3fa22cefec025e4eba"} Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.462733 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" event={"ID":"80bec2ab-0a88-4818-9339-760edda3b07e","Type":"ContainerDied","Data":"0f15016e29f1fc14f51b4d01e772071333fd8a885cacc1804b3a03dbcfc93836"} Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.462754 4753 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"e7138397dae5e90b30b0cdec02d29c108d423b3c94b07ea63e1560c4f02ed607"} Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.462769 4753 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"8e40e5b13dfdc135ea09253d466bacdfc5c2ac3d28eabc76d833d71ac300905c"} Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.462775 4753 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"f71859413676dab4a435d43c11ad7cba315a12de744c3c8b161273f6ad36580c"} Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.462781 4753 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"b0ec2a21ec299b5eed363668f39a270a8b03ea79b9de3dd23bda0c30579c3de3"} Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.462786 4753 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"1e6a7d8df812546f17b3e026005c5062086e97e3fa364b3fa22cefec025e4eba"} Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.462792 4753 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"0f15016e29f1fc14f51b4d01e772071333fd8a885cacc1804b3a03dbcfc93836"} Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.462797 4753 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"91c1df8b6763320498420463a3841f0bfa4fcf753c355f549c3c1e93bf5206a8"} Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.462802 4753 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"05d1e99596530f467ecdae51aec8db191ac045cd85e121fc9835e2f3688f6ae7"} Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.462808 4753 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c"} Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.462815 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" event={"ID":"80bec2ab-0a88-4818-9339-760edda3b07e","Type":"ContainerDied","Data":"91c1df8b6763320498420463a3841f0bfa4fcf753c355f549c3c1e93bf5206a8"} Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.462824 4753 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9ee2d75fd4d36ec3e097a8aa1ca55cef7754f5a5ed88d149bcf63e90d0fbf2c9"} Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.462838 4753 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"e7138397dae5e90b30b0cdec02d29c108d423b3c94b07ea63e1560c4f02ed607"} Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.462847 4753 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"8e40e5b13dfdc135ea09253d466bacdfc5c2ac3d28eabc76d833d71ac300905c"} Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.462852 4753 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"f71859413676dab4a435d43c11ad7cba315a12de744c3c8b161273f6ad36580c"} Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.462858 4753 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"b0ec2a21ec299b5eed363668f39a270a8b03ea79b9de3dd23bda0c30579c3de3"} Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.462863 4753 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"1e6a7d8df812546f17b3e026005c5062086e97e3fa364b3fa22cefec025e4eba"} Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.462868 4753 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"0f15016e29f1fc14f51b4d01e772071333fd8a885cacc1804b3a03dbcfc93836"} Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.462874 4753 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"91c1df8b6763320498420463a3841f0bfa4fcf753c355f549c3c1e93bf5206a8"} Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.462879 4753 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"05d1e99596530f467ecdae51aec8db191ac045cd85e121fc9835e2f3688f6ae7"} Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.462884 4753 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c"} Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.462891 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" event={"ID":"80bec2ab-0a88-4818-9339-760edda3b07e","Type":"ContainerDied","Data":"05d1e99596530f467ecdae51aec8db191ac045cd85e121fc9835e2f3688f6ae7"} Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.462898 4753 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9ee2d75fd4d36ec3e097a8aa1ca55cef7754f5a5ed88d149bcf63e90d0fbf2c9"} Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.462906 4753 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"e7138397dae5e90b30b0cdec02d29c108d423b3c94b07ea63e1560c4f02ed607"} Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.462911 4753 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"8e40e5b13dfdc135ea09253d466bacdfc5c2ac3d28eabc76d833d71ac300905c"} Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.462916 4753 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"f71859413676dab4a435d43c11ad7cba315a12de744c3c8b161273f6ad36580c"} Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.462921 4753 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"b0ec2a21ec299b5eed363668f39a270a8b03ea79b9de3dd23bda0c30579c3de3"} Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.462927 4753 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"1e6a7d8df812546f17b3e026005c5062086e97e3fa364b3fa22cefec025e4eba"} Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.462932 4753 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"0f15016e29f1fc14f51b4d01e772071333fd8a885cacc1804b3a03dbcfc93836"} Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.462937 4753 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"91c1df8b6763320498420463a3841f0bfa4fcf753c355f549c3c1e93bf5206a8"} Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.462942 4753 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"05d1e99596530f467ecdae51aec8db191ac045cd85e121fc9835e2f3688f6ae7"} Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.462947 4753 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c"} Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.462954 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" event={"ID":"80bec2ab-0a88-4818-9339-760edda3b07e","Type":"ContainerDied","Data":"97c76242144b0e1c85e0be3bcdd7a4d811921f97855ef3ddd033bad24283bf21"} Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.462962 4753 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9ee2d75fd4d36ec3e097a8aa1ca55cef7754f5a5ed88d149bcf63e90d0fbf2c9"} Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.462967 4753 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"e7138397dae5e90b30b0cdec02d29c108d423b3c94b07ea63e1560c4f02ed607"} Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.462972 4753 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"8e40e5b13dfdc135ea09253d466bacdfc5c2ac3d28eabc76d833d71ac300905c"} Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.462979 4753 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"f71859413676dab4a435d43c11ad7cba315a12de744c3c8b161273f6ad36580c"} Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.462984 4753 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"b0ec2a21ec299b5eed363668f39a270a8b03ea79b9de3dd23bda0c30579c3de3"} Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.462989 4753 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"1e6a7d8df812546f17b3e026005c5062086e97e3fa364b3fa22cefec025e4eba"} Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.462994 4753 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"0f15016e29f1fc14f51b4d01e772071333fd8a885cacc1804b3a03dbcfc93836"} Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.462999 4753 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"91c1df8b6763320498420463a3841f0bfa4fcf753c355f549c3c1e93bf5206a8"} Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.463005 4753 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"05d1e99596530f467ecdae51aec8db191ac045cd85e121fc9835e2f3688f6ae7"} Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.463010 4753 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c"} Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.463052 4753 scope.go:117] "RemoveContainer" containerID="9ee2d75fd4d36ec3e097a8aa1ca55cef7754f5a5ed88d149bcf63e90d0fbf2c9" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.463264 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-nzkvz" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.467303 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-rnbz9_b372210b-6e1b-4a80-b379-7c1d570712f3/kube-multus/1.log" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.468383 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-rnbz9_b372210b-6e1b-4a80-b379-7c1d570712f3/kube-multus/0.log" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.468618 4753 generic.go:334] "Generic (PLEG): container finished" podID="b372210b-6e1b-4a80-b379-7c1d570712f3" containerID="32065049674d4bf4bc624652e7d77fa28f08e40f89d5dd3bf75f0bc7038d35bf" exitCode=2 Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.468643 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-rnbz9" event={"ID":"b372210b-6e1b-4a80-b379-7c1d570712f3","Type":"ContainerDied","Data":"32065049674d4bf4bc624652e7d77fa28f08e40f89d5dd3bf75f0bc7038d35bf"} Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.468694 4753 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"346c36936603f4535d5ad3e6b60e451b05b4d376e2531c813f0c900c92caf5fd"} Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.469281 4753 scope.go:117] "RemoveContainer" containerID="32065049674d4bf4bc624652e7d77fa28f08e40f89d5dd3bf75f0bc7038d35bf" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.480581 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-plnc9" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.500767 4753 scope.go:117] "RemoveContainer" containerID="e7138397dae5e90b30b0cdec02d29c108d423b3c94b07ea63e1560c4f02ed607" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.521425 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-nzkvz"] Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.527744 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-nzkvz"] Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.551415 4753 scope.go:117] "RemoveContainer" containerID="8e40e5b13dfdc135ea09253d466bacdfc5c2ac3d28eabc76d833d71ac300905c" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.598547 4753 scope.go:117] "RemoveContainer" containerID="f71859413676dab4a435d43c11ad7cba315a12de744c3c8b161273f6ad36580c" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.637737 4753 scope.go:117] "RemoveContainer" containerID="b0ec2a21ec299b5eed363668f39a270a8b03ea79b9de3dd23bda0c30579c3de3" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.751456 4753 scope.go:117] "RemoveContainer" containerID="1e6a7d8df812546f17b3e026005c5062086e97e3fa364b3fa22cefec025e4eba" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.782422 4753 scope.go:117] "RemoveContainer" containerID="0f15016e29f1fc14f51b4d01e772071333fd8a885cacc1804b3a03dbcfc93836" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.796968 4753 scope.go:117] "RemoveContainer" containerID="91c1df8b6763320498420463a3841f0bfa4fcf753c355f549c3c1e93bf5206a8" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.811798 4753 scope.go:117] "RemoveContainer" containerID="05d1e99596530f467ecdae51aec8db191ac045cd85e121fc9835e2f3688f6ae7" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.829606 4753 scope.go:117] "RemoveContainer" containerID="87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.891325 4753 scope.go:117] "RemoveContainer" containerID="9ee2d75fd4d36ec3e097a8aa1ca55cef7754f5a5ed88d149bcf63e90d0fbf2c9" Jan 29 12:19:31 crc kubenswrapper[4753]: E0129 12:19:31.893188 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9ee2d75fd4d36ec3e097a8aa1ca55cef7754f5a5ed88d149bcf63e90d0fbf2c9\": container with ID starting with 9ee2d75fd4d36ec3e097a8aa1ca55cef7754f5a5ed88d149bcf63e90d0fbf2c9 not found: ID does not exist" containerID="9ee2d75fd4d36ec3e097a8aa1ca55cef7754f5a5ed88d149bcf63e90d0fbf2c9" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.893290 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9ee2d75fd4d36ec3e097a8aa1ca55cef7754f5a5ed88d149bcf63e90d0fbf2c9"} err="failed to get container status \"9ee2d75fd4d36ec3e097a8aa1ca55cef7754f5a5ed88d149bcf63e90d0fbf2c9\": rpc error: code = NotFound desc = could not find container \"9ee2d75fd4d36ec3e097a8aa1ca55cef7754f5a5ed88d149bcf63e90d0fbf2c9\": container with ID starting with 9ee2d75fd4d36ec3e097a8aa1ca55cef7754f5a5ed88d149bcf63e90d0fbf2c9 not found: ID does not exist" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.893342 4753 scope.go:117] "RemoveContainer" containerID="e7138397dae5e90b30b0cdec02d29c108d423b3c94b07ea63e1560c4f02ed607" Jan 29 12:19:31 crc kubenswrapper[4753]: E0129 12:19:31.894024 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e7138397dae5e90b30b0cdec02d29c108d423b3c94b07ea63e1560c4f02ed607\": container with ID starting with e7138397dae5e90b30b0cdec02d29c108d423b3c94b07ea63e1560c4f02ed607 not found: ID does not exist" containerID="e7138397dae5e90b30b0cdec02d29c108d423b3c94b07ea63e1560c4f02ed607" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.894100 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e7138397dae5e90b30b0cdec02d29c108d423b3c94b07ea63e1560c4f02ed607"} err="failed to get container status \"e7138397dae5e90b30b0cdec02d29c108d423b3c94b07ea63e1560c4f02ed607\": rpc error: code = NotFound desc = could not find container \"e7138397dae5e90b30b0cdec02d29c108d423b3c94b07ea63e1560c4f02ed607\": container with ID starting with e7138397dae5e90b30b0cdec02d29c108d423b3c94b07ea63e1560c4f02ed607 not found: ID does not exist" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.894168 4753 scope.go:117] "RemoveContainer" containerID="8e40e5b13dfdc135ea09253d466bacdfc5c2ac3d28eabc76d833d71ac300905c" Jan 29 12:19:31 crc kubenswrapper[4753]: E0129 12:19:31.894660 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8e40e5b13dfdc135ea09253d466bacdfc5c2ac3d28eabc76d833d71ac300905c\": container with ID starting with 8e40e5b13dfdc135ea09253d466bacdfc5c2ac3d28eabc76d833d71ac300905c not found: ID does not exist" containerID="8e40e5b13dfdc135ea09253d466bacdfc5c2ac3d28eabc76d833d71ac300905c" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.894716 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8e40e5b13dfdc135ea09253d466bacdfc5c2ac3d28eabc76d833d71ac300905c"} err="failed to get container status \"8e40e5b13dfdc135ea09253d466bacdfc5c2ac3d28eabc76d833d71ac300905c\": rpc error: code = NotFound desc = could not find container \"8e40e5b13dfdc135ea09253d466bacdfc5c2ac3d28eabc76d833d71ac300905c\": container with ID starting with 8e40e5b13dfdc135ea09253d466bacdfc5c2ac3d28eabc76d833d71ac300905c not found: ID does not exist" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.894751 4753 scope.go:117] "RemoveContainer" containerID="f71859413676dab4a435d43c11ad7cba315a12de744c3c8b161273f6ad36580c" Jan 29 12:19:31 crc kubenswrapper[4753]: E0129 12:19:31.895666 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f71859413676dab4a435d43c11ad7cba315a12de744c3c8b161273f6ad36580c\": container with ID starting with f71859413676dab4a435d43c11ad7cba315a12de744c3c8b161273f6ad36580c not found: ID does not exist" containerID="f71859413676dab4a435d43c11ad7cba315a12de744c3c8b161273f6ad36580c" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.895727 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f71859413676dab4a435d43c11ad7cba315a12de744c3c8b161273f6ad36580c"} err="failed to get container status \"f71859413676dab4a435d43c11ad7cba315a12de744c3c8b161273f6ad36580c\": rpc error: code = NotFound desc = could not find container \"f71859413676dab4a435d43c11ad7cba315a12de744c3c8b161273f6ad36580c\": container with ID starting with f71859413676dab4a435d43c11ad7cba315a12de744c3c8b161273f6ad36580c not found: ID does not exist" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.895764 4753 scope.go:117] "RemoveContainer" containerID="b0ec2a21ec299b5eed363668f39a270a8b03ea79b9de3dd23bda0c30579c3de3" Jan 29 12:19:31 crc kubenswrapper[4753]: E0129 12:19:31.896606 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b0ec2a21ec299b5eed363668f39a270a8b03ea79b9de3dd23bda0c30579c3de3\": container with ID starting with b0ec2a21ec299b5eed363668f39a270a8b03ea79b9de3dd23bda0c30579c3de3 not found: ID does not exist" containerID="b0ec2a21ec299b5eed363668f39a270a8b03ea79b9de3dd23bda0c30579c3de3" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.896629 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b0ec2a21ec299b5eed363668f39a270a8b03ea79b9de3dd23bda0c30579c3de3"} err="failed to get container status \"b0ec2a21ec299b5eed363668f39a270a8b03ea79b9de3dd23bda0c30579c3de3\": rpc error: code = NotFound desc = could not find container \"b0ec2a21ec299b5eed363668f39a270a8b03ea79b9de3dd23bda0c30579c3de3\": container with ID starting with b0ec2a21ec299b5eed363668f39a270a8b03ea79b9de3dd23bda0c30579c3de3 not found: ID does not exist" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.896643 4753 scope.go:117] "RemoveContainer" containerID="1e6a7d8df812546f17b3e026005c5062086e97e3fa364b3fa22cefec025e4eba" Jan 29 12:19:31 crc kubenswrapper[4753]: E0129 12:19:31.897047 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1e6a7d8df812546f17b3e026005c5062086e97e3fa364b3fa22cefec025e4eba\": container with ID starting with 1e6a7d8df812546f17b3e026005c5062086e97e3fa364b3fa22cefec025e4eba not found: ID does not exist" containerID="1e6a7d8df812546f17b3e026005c5062086e97e3fa364b3fa22cefec025e4eba" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.897072 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1e6a7d8df812546f17b3e026005c5062086e97e3fa364b3fa22cefec025e4eba"} err="failed to get container status \"1e6a7d8df812546f17b3e026005c5062086e97e3fa364b3fa22cefec025e4eba\": rpc error: code = NotFound desc = could not find container \"1e6a7d8df812546f17b3e026005c5062086e97e3fa364b3fa22cefec025e4eba\": container with ID starting with 1e6a7d8df812546f17b3e026005c5062086e97e3fa364b3fa22cefec025e4eba not found: ID does not exist" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.897086 4753 scope.go:117] "RemoveContainer" containerID="0f15016e29f1fc14f51b4d01e772071333fd8a885cacc1804b3a03dbcfc93836" Jan 29 12:19:31 crc kubenswrapper[4753]: E0129 12:19:31.897361 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0f15016e29f1fc14f51b4d01e772071333fd8a885cacc1804b3a03dbcfc93836\": container with ID starting with 0f15016e29f1fc14f51b4d01e772071333fd8a885cacc1804b3a03dbcfc93836 not found: ID does not exist" containerID="0f15016e29f1fc14f51b4d01e772071333fd8a885cacc1804b3a03dbcfc93836" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.897389 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0f15016e29f1fc14f51b4d01e772071333fd8a885cacc1804b3a03dbcfc93836"} err="failed to get container status \"0f15016e29f1fc14f51b4d01e772071333fd8a885cacc1804b3a03dbcfc93836\": rpc error: code = NotFound desc = could not find container \"0f15016e29f1fc14f51b4d01e772071333fd8a885cacc1804b3a03dbcfc93836\": container with ID starting with 0f15016e29f1fc14f51b4d01e772071333fd8a885cacc1804b3a03dbcfc93836 not found: ID does not exist" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.897407 4753 scope.go:117] "RemoveContainer" containerID="91c1df8b6763320498420463a3841f0bfa4fcf753c355f549c3c1e93bf5206a8" Jan 29 12:19:31 crc kubenswrapper[4753]: E0129 12:19:31.897672 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"91c1df8b6763320498420463a3841f0bfa4fcf753c355f549c3c1e93bf5206a8\": container with ID starting with 91c1df8b6763320498420463a3841f0bfa4fcf753c355f549c3c1e93bf5206a8 not found: ID does not exist" containerID="91c1df8b6763320498420463a3841f0bfa4fcf753c355f549c3c1e93bf5206a8" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.897720 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"91c1df8b6763320498420463a3841f0bfa4fcf753c355f549c3c1e93bf5206a8"} err="failed to get container status \"91c1df8b6763320498420463a3841f0bfa4fcf753c355f549c3c1e93bf5206a8\": rpc error: code = NotFound desc = could not find container \"91c1df8b6763320498420463a3841f0bfa4fcf753c355f549c3c1e93bf5206a8\": container with ID starting with 91c1df8b6763320498420463a3841f0bfa4fcf753c355f549c3c1e93bf5206a8 not found: ID does not exist" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.897735 4753 scope.go:117] "RemoveContainer" containerID="05d1e99596530f467ecdae51aec8db191ac045cd85e121fc9835e2f3688f6ae7" Jan 29 12:19:31 crc kubenswrapper[4753]: E0129 12:19:31.898098 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"05d1e99596530f467ecdae51aec8db191ac045cd85e121fc9835e2f3688f6ae7\": container with ID starting with 05d1e99596530f467ecdae51aec8db191ac045cd85e121fc9835e2f3688f6ae7 not found: ID does not exist" containerID="05d1e99596530f467ecdae51aec8db191ac045cd85e121fc9835e2f3688f6ae7" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.898122 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"05d1e99596530f467ecdae51aec8db191ac045cd85e121fc9835e2f3688f6ae7"} err="failed to get container status \"05d1e99596530f467ecdae51aec8db191ac045cd85e121fc9835e2f3688f6ae7\": rpc error: code = NotFound desc = could not find container \"05d1e99596530f467ecdae51aec8db191ac045cd85e121fc9835e2f3688f6ae7\": container with ID starting with 05d1e99596530f467ecdae51aec8db191ac045cd85e121fc9835e2f3688f6ae7 not found: ID does not exist" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.898139 4753 scope.go:117] "RemoveContainer" containerID="87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c" Jan 29 12:19:31 crc kubenswrapper[4753]: E0129 12:19:31.898399 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c\": container with ID starting with 87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c not found: ID does not exist" containerID="87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.898417 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c"} err="failed to get container status \"87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c\": rpc error: code = NotFound desc = could not find container \"87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c\": container with ID starting with 87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c not found: ID does not exist" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.898436 4753 scope.go:117] "RemoveContainer" containerID="9ee2d75fd4d36ec3e097a8aa1ca55cef7754f5a5ed88d149bcf63e90d0fbf2c9" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.898828 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9ee2d75fd4d36ec3e097a8aa1ca55cef7754f5a5ed88d149bcf63e90d0fbf2c9"} err="failed to get container status \"9ee2d75fd4d36ec3e097a8aa1ca55cef7754f5a5ed88d149bcf63e90d0fbf2c9\": rpc error: code = NotFound desc = could not find container \"9ee2d75fd4d36ec3e097a8aa1ca55cef7754f5a5ed88d149bcf63e90d0fbf2c9\": container with ID starting with 9ee2d75fd4d36ec3e097a8aa1ca55cef7754f5a5ed88d149bcf63e90d0fbf2c9 not found: ID does not exist" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.898851 4753 scope.go:117] "RemoveContainer" containerID="e7138397dae5e90b30b0cdec02d29c108d423b3c94b07ea63e1560c4f02ed607" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.899143 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e7138397dae5e90b30b0cdec02d29c108d423b3c94b07ea63e1560c4f02ed607"} err="failed to get container status \"e7138397dae5e90b30b0cdec02d29c108d423b3c94b07ea63e1560c4f02ed607\": rpc error: code = NotFound desc = could not find container \"e7138397dae5e90b30b0cdec02d29c108d423b3c94b07ea63e1560c4f02ed607\": container with ID starting with e7138397dae5e90b30b0cdec02d29c108d423b3c94b07ea63e1560c4f02ed607 not found: ID does not exist" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.899161 4753 scope.go:117] "RemoveContainer" containerID="8e40e5b13dfdc135ea09253d466bacdfc5c2ac3d28eabc76d833d71ac300905c" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.899452 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8e40e5b13dfdc135ea09253d466bacdfc5c2ac3d28eabc76d833d71ac300905c"} err="failed to get container status \"8e40e5b13dfdc135ea09253d466bacdfc5c2ac3d28eabc76d833d71ac300905c\": rpc error: code = NotFound desc = could not find container \"8e40e5b13dfdc135ea09253d466bacdfc5c2ac3d28eabc76d833d71ac300905c\": container with ID starting with 8e40e5b13dfdc135ea09253d466bacdfc5c2ac3d28eabc76d833d71ac300905c not found: ID does not exist" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.899491 4753 scope.go:117] "RemoveContainer" containerID="f71859413676dab4a435d43c11ad7cba315a12de744c3c8b161273f6ad36580c" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.900950 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="80bec2ab-0a88-4818-9339-760edda3b07e" path="/var/lib/kubelet/pods/80bec2ab-0a88-4818-9339-760edda3b07e/volumes" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.902324 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f71859413676dab4a435d43c11ad7cba315a12de744c3c8b161273f6ad36580c"} err="failed to get container status \"f71859413676dab4a435d43c11ad7cba315a12de744c3c8b161273f6ad36580c\": rpc error: code = NotFound desc = could not find container \"f71859413676dab4a435d43c11ad7cba315a12de744c3c8b161273f6ad36580c\": container with ID starting with f71859413676dab4a435d43c11ad7cba315a12de744c3c8b161273f6ad36580c not found: ID does not exist" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.902347 4753 scope.go:117] "RemoveContainer" containerID="b0ec2a21ec299b5eed363668f39a270a8b03ea79b9de3dd23bda0c30579c3de3" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.902620 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b0ec2a21ec299b5eed363668f39a270a8b03ea79b9de3dd23bda0c30579c3de3"} err="failed to get container status \"b0ec2a21ec299b5eed363668f39a270a8b03ea79b9de3dd23bda0c30579c3de3\": rpc error: code = NotFound desc = could not find container \"b0ec2a21ec299b5eed363668f39a270a8b03ea79b9de3dd23bda0c30579c3de3\": container with ID starting with b0ec2a21ec299b5eed363668f39a270a8b03ea79b9de3dd23bda0c30579c3de3 not found: ID does not exist" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.902637 4753 scope.go:117] "RemoveContainer" containerID="1e6a7d8df812546f17b3e026005c5062086e97e3fa364b3fa22cefec025e4eba" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.903058 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1e6a7d8df812546f17b3e026005c5062086e97e3fa364b3fa22cefec025e4eba"} err="failed to get container status \"1e6a7d8df812546f17b3e026005c5062086e97e3fa364b3fa22cefec025e4eba\": rpc error: code = NotFound desc = could not find container \"1e6a7d8df812546f17b3e026005c5062086e97e3fa364b3fa22cefec025e4eba\": container with ID starting with 1e6a7d8df812546f17b3e026005c5062086e97e3fa364b3fa22cefec025e4eba not found: ID does not exist" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.903082 4753 scope.go:117] "RemoveContainer" containerID="0f15016e29f1fc14f51b4d01e772071333fd8a885cacc1804b3a03dbcfc93836" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.903653 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0f15016e29f1fc14f51b4d01e772071333fd8a885cacc1804b3a03dbcfc93836"} err="failed to get container status \"0f15016e29f1fc14f51b4d01e772071333fd8a885cacc1804b3a03dbcfc93836\": rpc error: code = NotFound desc = could not find container \"0f15016e29f1fc14f51b4d01e772071333fd8a885cacc1804b3a03dbcfc93836\": container with ID starting with 0f15016e29f1fc14f51b4d01e772071333fd8a885cacc1804b3a03dbcfc93836 not found: ID does not exist" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.903676 4753 scope.go:117] "RemoveContainer" containerID="91c1df8b6763320498420463a3841f0bfa4fcf753c355f549c3c1e93bf5206a8" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.904085 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"91c1df8b6763320498420463a3841f0bfa4fcf753c355f549c3c1e93bf5206a8"} err="failed to get container status \"91c1df8b6763320498420463a3841f0bfa4fcf753c355f549c3c1e93bf5206a8\": rpc error: code = NotFound desc = could not find container \"91c1df8b6763320498420463a3841f0bfa4fcf753c355f549c3c1e93bf5206a8\": container with ID starting with 91c1df8b6763320498420463a3841f0bfa4fcf753c355f549c3c1e93bf5206a8 not found: ID does not exist" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.904125 4753 scope.go:117] "RemoveContainer" containerID="05d1e99596530f467ecdae51aec8db191ac045cd85e121fc9835e2f3688f6ae7" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.904371 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"05d1e99596530f467ecdae51aec8db191ac045cd85e121fc9835e2f3688f6ae7"} err="failed to get container status \"05d1e99596530f467ecdae51aec8db191ac045cd85e121fc9835e2f3688f6ae7\": rpc error: code = NotFound desc = could not find container \"05d1e99596530f467ecdae51aec8db191ac045cd85e121fc9835e2f3688f6ae7\": container with ID starting with 05d1e99596530f467ecdae51aec8db191ac045cd85e121fc9835e2f3688f6ae7 not found: ID does not exist" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.904400 4753 scope.go:117] "RemoveContainer" containerID="87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.904596 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c"} err="failed to get container status \"87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c\": rpc error: code = NotFound desc = could not find container \"87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c\": container with ID starting with 87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c not found: ID does not exist" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.904612 4753 scope.go:117] "RemoveContainer" containerID="9ee2d75fd4d36ec3e097a8aa1ca55cef7754f5a5ed88d149bcf63e90d0fbf2c9" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.904890 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9ee2d75fd4d36ec3e097a8aa1ca55cef7754f5a5ed88d149bcf63e90d0fbf2c9"} err="failed to get container status \"9ee2d75fd4d36ec3e097a8aa1ca55cef7754f5a5ed88d149bcf63e90d0fbf2c9\": rpc error: code = NotFound desc = could not find container \"9ee2d75fd4d36ec3e097a8aa1ca55cef7754f5a5ed88d149bcf63e90d0fbf2c9\": container with ID starting with 9ee2d75fd4d36ec3e097a8aa1ca55cef7754f5a5ed88d149bcf63e90d0fbf2c9 not found: ID does not exist" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.904906 4753 scope.go:117] "RemoveContainer" containerID="e7138397dae5e90b30b0cdec02d29c108d423b3c94b07ea63e1560c4f02ed607" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.905174 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e7138397dae5e90b30b0cdec02d29c108d423b3c94b07ea63e1560c4f02ed607"} err="failed to get container status \"e7138397dae5e90b30b0cdec02d29c108d423b3c94b07ea63e1560c4f02ed607\": rpc error: code = NotFound desc = could not find container \"e7138397dae5e90b30b0cdec02d29c108d423b3c94b07ea63e1560c4f02ed607\": container with ID starting with e7138397dae5e90b30b0cdec02d29c108d423b3c94b07ea63e1560c4f02ed607 not found: ID does not exist" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.905191 4753 scope.go:117] "RemoveContainer" containerID="8e40e5b13dfdc135ea09253d466bacdfc5c2ac3d28eabc76d833d71ac300905c" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.905452 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8e40e5b13dfdc135ea09253d466bacdfc5c2ac3d28eabc76d833d71ac300905c"} err="failed to get container status \"8e40e5b13dfdc135ea09253d466bacdfc5c2ac3d28eabc76d833d71ac300905c\": rpc error: code = NotFound desc = could not find container \"8e40e5b13dfdc135ea09253d466bacdfc5c2ac3d28eabc76d833d71ac300905c\": container with ID starting with 8e40e5b13dfdc135ea09253d466bacdfc5c2ac3d28eabc76d833d71ac300905c not found: ID does not exist" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.905467 4753 scope.go:117] "RemoveContainer" containerID="f71859413676dab4a435d43c11ad7cba315a12de744c3c8b161273f6ad36580c" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.905916 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f71859413676dab4a435d43c11ad7cba315a12de744c3c8b161273f6ad36580c"} err="failed to get container status \"f71859413676dab4a435d43c11ad7cba315a12de744c3c8b161273f6ad36580c\": rpc error: code = NotFound desc = could not find container \"f71859413676dab4a435d43c11ad7cba315a12de744c3c8b161273f6ad36580c\": container with ID starting with f71859413676dab4a435d43c11ad7cba315a12de744c3c8b161273f6ad36580c not found: ID does not exist" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.905957 4753 scope.go:117] "RemoveContainer" containerID="b0ec2a21ec299b5eed363668f39a270a8b03ea79b9de3dd23bda0c30579c3de3" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.906185 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b0ec2a21ec299b5eed363668f39a270a8b03ea79b9de3dd23bda0c30579c3de3"} err="failed to get container status \"b0ec2a21ec299b5eed363668f39a270a8b03ea79b9de3dd23bda0c30579c3de3\": rpc error: code = NotFound desc = could not find container \"b0ec2a21ec299b5eed363668f39a270a8b03ea79b9de3dd23bda0c30579c3de3\": container with ID starting with b0ec2a21ec299b5eed363668f39a270a8b03ea79b9de3dd23bda0c30579c3de3 not found: ID does not exist" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.906202 4753 scope.go:117] "RemoveContainer" containerID="1e6a7d8df812546f17b3e026005c5062086e97e3fa364b3fa22cefec025e4eba" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.906420 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1e6a7d8df812546f17b3e026005c5062086e97e3fa364b3fa22cefec025e4eba"} err="failed to get container status \"1e6a7d8df812546f17b3e026005c5062086e97e3fa364b3fa22cefec025e4eba\": rpc error: code = NotFound desc = could not find container \"1e6a7d8df812546f17b3e026005c5062086e97e3fa364b3fa22cefec025e4eba\": container with ID starting with 1e6a7d8df812546f17b3e026005c5062086e97e3fa364b3fa22cefec025e4eba not found: ID does not exist" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.906438 4753 scope.go:117] "RemoveContainer" containerID="0f15016e29f1fc14f51b4d01e772071333fd8a885cacc1804b3a03dbcfc93836" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.906691 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0f15016e29f1fc14f51b4d01e772071333fd8a885cacc1804b3a03dbcfc93836"} err="failed to get container status \"0f15016e29f1fc14f51b4d01e772071333fd8a885cacc1804b3a03dbcfc93836\": rpc error: code = NotFound desc = could not find container \"0f15016e29f1fc14f51b4d01e772071333fd8a885cacc1804b3a03dbcfc93836\": container with ID starting with 0f15016e29f1fc14f51b4d01e772071333fd8a885cacc1804b3a03dbcfc93836 not found: ID does not exist" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.906707 4753 scope.go:117] "RemoveContainer" containerID="91c1df8b6763320498420463a3841f0bfa4fcf753c355f549c3c1e93bf5206a8" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.906934 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"91c1df8b6763320498420463a3841f0bfa4fcf753c355f549c3c1e93bf5206a8"} err="failed to get container status \"91c1df8b6763320498420463a3841f0bfa4fcf753c355f549c3c1e93bf5206a8\": rpc error: code = NotFound desc = could not find container \"91c1df8b6763320498420463a3841f0bfa4fcf753c355f549c3c1e93bf5206a8\": container with ID starting with 91c1df8b6763320498420463a3841f0bfa4fcf753c355f549c3c1e93bf5206a8 not found: ID does not exist" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.906954 4753 scope.go:117] "RemoveContainer" containerID="05d1e99596530f467ecdae51aec8db191ac045cd85e121fc9835e2f3688f6ae7" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.907147 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"05d1e99596530f467ecdae51aec8db191ac045cd85e121fc9835e2f3688f6ae7"} err="failed to get container status \"05d1e99596530f467ecdae51aec8db191ac045cd85e121fc9835e2f3688f6ae7\": rpc error: code = NotFound desc = could not find container \"05d1e99596530f467ecdae51aec8db191ac045cd85e121fc9835e2f3688f6ae7\": container with ID starting with 05d1e99596530f467ecdae51aec8db191ac045cd85e121fc9835e2f3688f6ae7 not found: ID does not exist" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.907164 4753 scope.go:117] "RemoveContainer" containerID="87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.907425 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c"} err="failed to get container status \"87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c\": rpc error: code = NotFound desc = could not find container \"87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c\": container with ID starting with 87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c not found: ID does not exist" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.907442 4753 scope.go:117] "RemoveContainer" containerID="9ee2d75fd4d36ec3e097a8aa1ca55cef7754f5a5ed88d149bcf63e90d0fbf2c9" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.907749 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9ee2d75fd4d36ec3e097a8aa1ca55cef7754f5a5ed88d149bcf63e90d0fbf2c9"} err="failed to get container status \"9ee2d75fd4d36ec3e097a8aa1ca55cef7754f5a5ed88d149bcf63e90d0fbf2c9\": rpc error: code = NotFound desc = could not find container \"9ee2d75fd4d36ec3e097a8aa1ca55cef7754f5a5ed88d149bcf63e90d0fbf2c9\": container with ID starting with 9ee2d75fd4d36ec3e097a8aa1ca55cef7754f5a5ed88d149bcf63e90d0fbf2c9 not found: ID does not exist" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.907768 4753 scope.go:117] "RemoveContainer" containerID="e7138397dae5e90b30b0cdec02d29c108d423b3c94b07ea63e1560c4f02ed607" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.908011 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e7138397dae5e90b30b0cdec02d29c108d423b3c94b07ea63e1560c4f02ed607"} err="failed to get container status \"e7138397dae5e90b30b0cdec02d29c108d423b3c94b07ea63e1560c4f02ed607\": rpc error: code = NotFound desc = could not find container \"e7138397dae5e90b30b0cdec02d29c108d423b3c94b07ea63e1560c4f02ed607\": container with ID starting with e7138397dae5e90b30b0cdec02d29c108d423b3c94b07ea63e1560c4f02ed607 not found: ID does not exist" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.908024 4753 scope.go:117] "RemoveContainer" containerID="8e40e5b13dfdc135ea09253d466bacdfc5c2ac3d28eabc76d833d71ac300905c" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.908367 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8e40e5b13dfdc135ea09253d466bacdfc5c2ac3d28eabc76d833d71ac300905c"} err="failed to get container status \"8e40e5b13dfdc135ea09253d466bacdfc5c2ac3d28eabc76d833d71ac300905c\": rpc error: code = NotFound desc = could not find container \"8e40e5b13dfdc135ea09253d466bacdfc5c2ac3d28eabc76d833d71ac300905c\": container with ID starting with 8e40e5b13dfdc135ea09253d466bacdfc5c2ac3d28eabc76d833d71ac300905c not found: ID does not exist" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.908378 4753 scope.go:117] "RemoveContainer" containerID="f71859413676dab4a435d43c11ad7cba315a12de744c3c8b161273f6ad36580c" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.908637 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f71859413676dab4a435d43c11ad7cba315a12de744c3c8b161273f6ad36580c"} err="failed to get container status \"f71859413676dab4a435d43c11ad7cba315a12de744c3c8b161273f6ad36580c\": rpc error: code = NotFound desc = could not find container \"f71859413676dab4a435d43c11ad7cba315a12de744c3c8b161273f6ad36580c\": container with ID starting with f71859413676dab4a435d43c11ad7cba315a12de744c3c8b161273f6ad36580c not found: ID does not exist" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.908653 4753 scope.go:117] "RemoveContainer" containerID="b0ec2a21ec299b5eed363668f39a270a8b03ea79b9de3dd23bda0c30579c3de3" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.908873 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b0ec2a21ec299b5eed363668f39a270a8b03ea79b9de3dd23bda0c30579c3de3"} err="failed to get container status \"b0ec2a21ec299b5eed363668f39a270a8b03ea79b9de3dd23bda0c30579c3de3\": rpc error: code = NotFound desc = could not find container \"b0ec2a21ec299b5eed363668f39a270a8b03ea79b9de3dd23bda0c30579c3de3\": container with ID starting with b0ec2a21ec299b5eed363668f39a270a8b03ea79b9de3dd23bda0c30579c3de3 not found: ID does not exist" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.908887 4753 scope.go:117] "RemoveContainer" containerID="1e6a7d8df812546f17b3e026005c5062086e97e3fa364b3fa22cefec025e4eba" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.909079 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1e6a7d8df812546f17b3e026005c5062086e97e3fa364b3fa22cefec025e4eba"} err="failed to get container status \"1e6a7d8df812546f17b3e026005c5062086e97e3fa364b3fa22cefec025e4eba\": rpc error: code = NotFound desc = could not find container \"1e6a7d8df812546f17b3e026005c5062086e97e3fa364b3fa22cefec025e4eba\": container with ID starting with 1e6a7d8df812546f17b3e026005c5062086e97e3fa364b3fa22cefec025e4eba not found: ID does not exist" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.909093 4753 scope.go:117] "RemoveContainer" containerID="0f15016e29f1fc14f51b4d01e772071333fd8a885cacc1804b3a03dbcfc93836" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.909348 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0f15016e29f1fc14f51b4d01e772071333fd8a885cacc1804b3a03dbcfc93836"} err="failed to get container status \"0f15016e29f1fc14f51b4d01e772071333fd8a885cacc1804b3a03dbcfc93836\": rpc error: code = NotFound desc = could not find container \"0f15016e29f1fc14f51b4d01e772071333fd8a885cacc1804b3a03dbcfc93836\": container with ID starting with 0f15016e29f1fc14f51b4d01e772071333fd8a885cacc1804b3a03dbcfc93836 not found: ID does not exist" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.909363 4753 scope.go:117] "RemoveContainer" containerID="91c1df8b6763320498420463a3841f0bfa4fcf753c355f549c3c1e93bf5206a8" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.909567 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"91c1df8b6763320498420463a3841f0bfa4fcf753c355f549c3c1e93bf5206a8"} err="failed to get container status \"91c1df8b6763320498420463a3841f0bfa4fcf753c355f549c3c1e93bf5206a8\": rpc error: code = NotFound desc = could not find container \"91c1df8b6763320498420463a3841f0bfa4fcf753c355f549c3c1e93bf5206a8\": container with ID starting with 91c1df8b6763320498420463a3841f0bfa4fcf753c355f549c3c1e93bf5206a8 not found: ID does not exist" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.909581 4753 scope.go:117] "RemoveContainer" containerID="05d1e99596530f467ecdae51aec8db191ac045cd85e121fc9835e2f3688f6ae7" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.909778 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"05d1e99596530f467ecdae51aec8db191ac045cd85e121fc9835e2f3688f6ae7"} err="failed to get container status \"05d1e99596530f467ecdae51aec8db191ac045cd85e121fc9835e2f3688f6ae7\": rpc error: code = NotFound desc = could not find container \"05d1e99596530f467ecdae51aec8db191ac045cd85e121fc9835e2f3688f6ae7\": container with ID starting with 05d1e99596530f467ecdae51aec8db191ac045cd85e121fc9835e2f3688f6ae7 not found: ID does not exist" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.909792 4753 scope.go:117] "RemoveContainer" containerID="87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.910029 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c"} err="failed to get container status \"87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c\": rpc error: code = NotFound desc = could not find container \"87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c\": container with ID starting with 87808d5f75236278e7cc6e30b1627d2a33b1a18116ada8fe7ef54fa391352d7c not found: ID does not exist" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.910044 4753 scope.go:117] "RemoveContainer" containerID="9ee2d75fd4d36ec3e097a8aa1ca55cef7754f5a5ed88d149bcf63e90d0fbf2c9" Jan 29 12:19:31 crc kubenswrapper[4753]: I0129 12:19:31.910278 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9ee2d75fd4d36ec3e097a8aa1ca55cef7754f5a5ed88d149bcf63e90d0fbf2c9"} err="failed to get container status \"9ee2d75fd4d36ec3e097a8aa1ca55cef7754f5a5ed88d149bcf63e90d0fbf2c9\": rpc error: code = NotFound desc = could not find container \"9ee2d75fd4d36ec3e097a8aa1ca55cef7754f5a5ed88d149bcf63e90d0fbf2c9\": container with ID starting with 9ee2d75fd4d36ec3e097a8aa1ca55cef7754f5a5ed88d149bcf63e90d0fbf2c9 not found: ID does not exist" Jan 29 12:19:32 crc kubenswrapper[4753]: I0129 12:19:32.477442 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-rnbz9_b372210b-6e1b-4a80-b379-7c1d570712f3/kube-multus/1.log" Jan 29 12:19:32 crc kubenswrapper[4753]: I0129 12:19:32.479894 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-rnbz9_b372210b-6e1b-4a80-b379-7c1d570712f3/kube-multus/0.log" Jan 29 12:19:32 crc kubenswrapper[4753]: I0129 12:19:32.480177 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-rnbz9" event={"ID":"b372210b-6e1b-4a80-b379-7c1d570712f3","Type":"ContainerStarted","Data":"cafee5550098894ec9f61260a7ac2958a62547fca62a9e92e5f26a961d816273"} Jan 29 12:19:32 crc kubenswrapper[4753]: I0129 12:19:32.484115 4753 generic.go:334] "Generic (PLEG): container finished" podID="619dbf8f-19c9-4a31-ad2a-8a8a827a9087" containerID="edadebee2b174e3546a71e055eb88386547bee858ee98808dfe04c5fd65480b6" exitCode=0 Jan 29 12:19:32 crc kubenswrapper[4753]: I0129 12:19:32.484175 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-plnc9" event={"ID":"619dbf8f-19c9-4a31-ad2a-8a8a827a9087","Type":"ContainerDied","Data":"edadebee2b174e3546a71e055eb88386547bee858ee98808dfe04c5fd65480b6"} Jan 29 12:19:32 crc kubenswrapper[4753]: I0129 12:19:32.484199 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-plnc9" event={"ID":"619dbf8f-19c9-4a31-ad2a-8a8a827a9087","Type":"ContainerStarted","Data":"109be103e211a4f3e092ce2399908fdeed5f838fec013606cf3769a127840858"} Jan 29 12:19:33 crc kubenswrapper[4753]: I0129 12:19:33.502514 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-plnc9" event={"ID":"619dbf8f-19c9-4a31-ad2a-8a8a827a9087","Type":"ContainerStarted","Data":"43d19dab65a1623137c9d5c5159964d86ec4609239aedfd3f03a8c4c11f89e0c"} Jan 29 12:19:33 crc kubenswrapper[4753]: I0129 12:19:33.503090 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-plnc9" event={"ID":"619dbf8f-19c9-4a31-ad2a-8a8a827a9087","Type":"ContainerStarted","Data":"3d5c785cb6e2c02f376f759129e7df2967d54d6f75275b5146f6e5f5b20ec9eb"} Jan 29 12:19:33 crc kubenswrapper[4753]: I0129 12:19:33.503103 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-plnc9" event={"ID":"619dbf8f-19c9-4a31-ad2a-8a8a827a9087","Type":"ContainerStarted","Data":"7cdaf0132fc593d89e5dc5e1305bd1e5a23e5d5e69e1eb5a37d33c2739757991"} Jan 29 12:19:33 crc kubenswrapper[4753]: I0129 12:19:33.503116 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-plnc9" event={"ID":"619dbf8f-19c9-4a31-ad2a-8a8a827a9087","Type":"ContainerStarted","Data":"f44129084a237994fb9b0cf4a3723139f8b452b0aebe310ea014c76d402b18a4"} Jan 29 12:19:33 crc kubenswrapper[4753]: I0129 12:19:33.503126 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-plnc9" event={"ID":"619dbf8f-19c9-4a31-ad2a-8a8a827a9087","Type":"ContainerStarted","Data":"be1aafb85a15714193e8c33b4abe7d30c6bb463b566460c9a342bea82faa2165"} Jan 29 12:19:33 crc kubenswrapper[4753]: I0129 12:19:33.503134 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-plnc9" event={"ID":"619dbf8f-19c9-4a31-ad2a-8a8a827a9087","Type":"ContainerStarted","Data":"056c6db8f380cb50dca5ab5727913bc509be1aa19d4a0ca89ac2b2928e6927f1"} Jan 29 12:19:36 crc kubenswrapper[4753]: I0129 12:19:36.532770 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-plnc9" event={"ID":"619dbf8f-19c9-4a31-ad2a-8a8a827a9087","Type":"ContainerStarted","Data":"445ac42daf530afebd9cec01370e066b5cb9c718035263020d039cbced74e406"} Jan 29 12:19:38 crc kubenswrapper[4753]: I0129 12:19:38.550825 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-plnc9" event={"ID":"619dbf8f-19c9-4a31-ad2a-8a8a827a9087","Type":"ContainerStarted","Data":"037f7a105e508d90d0ea9b16204ef4cedd6d1133e7635a869193a3edbb718591"} Jan 29 12:19:38 crc kubenswrapper[4753]: I0129 12:19:38.551723 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-plnc9" Jan 29 12:19:38 crc kubenswrapper[4753]: I0129 12:19:38.551962 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-plnc9" Jan 29 12:19:38 crc kubenswrapper[4753]: I0129 12:19:38.551991 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-plnc9" Jan 29 12:19:38 crc kubenswrapper[4753]: I0129 12:19:38.581643 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-plnc9" Jan 29 12:19:38 crc kubenswrapper[4753]: I0129 12:19:38.583340 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-plnc9" Jan 29 12:19:38 crc kubenswrapper[4753]: I0129 12:19:38.594890 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-plnc9" podStartSLOduration=8.594844463 podStartE2EDuration="8.594844463s" podCreationTimestamp="2026-01-29 12:19:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:19:38.594552535 +0000 UTC m=+792.846634010" watchObservedRunningTime="2026-01-29 12:19:38.594844463 +0000 UTC m=+792.846925928" Jan 29 12:19:55 crc kubenswrapper[4753]: I0129 12:19:55.930763 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dc7b4vm"] Jan 29 12:19:55 crc kubenswrapper[4753]: I0129 12:19:55.933515 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dc7b4vm" Jan 29 12:19:55 crc kubenswrapper[4753]: I0129 12:19:55.936497 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Jan 29 12:19:55 crc kubenswrapper[4753]: I0129 12:19:55.937869 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dc7b4vm"] Jan 29 12:19:56 crc kubenswrapper[4753]: I0129 12:19:56.099656 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5sn2c\" (UniqueName: \"kubernetes.io/projected/01efdd65-431c-4f40-b00f-00cc75ed4682-kube-api-access-5sn2c\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dc7b4vm\" (UID: \"01efdd65-431c-4f40-b00f-00cc75ed4682\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dc7b4vm" Jan 29 12:19:56 crc kubenswrapper[4753]: I0129 12:19:56.099964 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/01efdd65-431c-4f40-b00f-00cc75ed4682-bundle\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dc7b4vm\" (UID: \"01efdd65-431c-4f40-b00f-00cc75ed4682\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dc7b4vm" Jan 29 12:19:56 crc kubenswrapper[4753]: I0129 12:19:56.100243 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/01efdd65-431c-4f40-b00f-00cc75ed4682-util\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dc7b4vm\" (UID: \"01efdd65-431c-4f40-b00f-00cc75ed4682\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dc7b4vm" Jan 29 12:19:56 crc kubenswrapper[4753]: I0129 12:19:56.201646 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/01efdd65-431c-4f40-b00f-00cc75ed4682-bundle\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dc7b4vm\" (UID: \"01efdd65-431c-4f40-b00f-00cc75ed4682\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dc7b4vm" Jan 29 12:19:56 crc kubenswrapper[4753]: I0129 12:19:56.201990 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/01efdd65-431c-4f40-b00f-00cc75ed4682-util\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dc7b4vm\" (UID: \"01efdd65-431c-4f40-b00f-00cc75ed4682\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dc7b4vm" Jan 29 12:19:56 crc kubenswrapper[4753]: I0129 12:19:56.202054 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5sn2c\" (UniqueName: \"kubernetes.io/projected/01efdd65-431c-4f40-b00f-00cc75ed4682-kube-api-access-5sn2c\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dc7b4vm\" (UID: \"01efdd65-431c-4f40-b00f-00cc75ed4682\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dc7b4vm" Jan 29 12:19:56 crc kubenswrapper[4753]: I0129 12:19:56.202384 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/01efdd65-431c-4f40-b00f-00cc75ed4682-bundle\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dc7b4vm\" (UID: \"01efdd65-431c-4f40-b00f-00cc75ed4682\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dc7b4vm" Jan 29 12:19:56 crc kubenswrapper[4753]: I0129 12:19:56.202613 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/01efdd65-431c-4f40-b00f-00cc75ed4682-util\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dc7b4vm\" (UID: \"01efdd65-431c-4f40-b00f-00cc75ed4682\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dc7b4vm" Jan 29 12:19:56 crc kubenswrapper[4753]: I0129 12:19:56.221243 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5sn2c\" (UniqueName: \"kubernetes.io/projected/01efdd65-431c-4f40-b00f-00cc75ed4682-kube-api-access-5sn2c\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dc7b4vm\" (UID: \"01efdd65-431c-4f40-b00f-00cc75ed4682\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dc7b4vm" Jan 29 12:19:56 crc kubenswrapper[4753]: I0129 12:19:56.254563 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dc7b4vm" Jan 29 12:19:56 crc kubenswrapper[4753]: I0129 12:19:56.459884 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dc7b4vm"] Jan 29 12:19:56 crc kubenswrapper[4753]: W0129 12:19:56.468702 4753 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod01efdd65_431c_4f40_b00f_00cc75ed4682.slice/crio-547114d445c1fa050f560ce7f0fdbf0867eed8903b14c077378e2d3cf8ee2b25 WatchSource:0}: Error finding container 547114d445c1fa050f560ce7f0fdbf0867eed8903b14c077378e2d3cf8ee2b25: Status 404 returned error can't find the container with id 547114d445c1fa050f560ce7f0fdbf0867eed8903b14c077378e2d3cf8ee2b25 Jan 29 12:19:56 crc kubenswrapper[4753]: I0129 12:19:56.659521 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dc7b4vm" event={"ID":"01efdd65-431c-4f40-b00f-00cc75ed4682","Type":"ContainerStarted","Data":"538f42641ddbc0a6d9ddda8178e03c0dd6e4dec215f1fb4dcc313cee013cd345"} Jan 29 12:19:56 crc kubenswrapper[4753]: I0129 12:19:56.659581 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dc7b4vm" event={"ID":"01efdd65-431c-4f40-b00f-00cc75ed4682","Type":"ContainerStarted","Data":"547114d445c1fa050f560ce7f0fdbf0867eed8903b14c077378e2d3cf8ee2b25"} Jan 29 12:19:57 crc kubenswrapper[4753]: I0129 12:19:57.666142 4753 generic.go:334] "Generic (PLEG): container finished" podID="01efdd65-431c-4f40-b00f-00cc75ed4682" containerID="538f42641ddbc0a6d9ddda8178e03c0dd6e4dec215f1fb4dcc313cee013cd345" exitCode=0 Jan 29 12:19:57 crc kubenswrapper[4753]: I0129 12:19:57.666189 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dc7b4vm" event={"ID":"01efdd65-431c-4f40-b00f-00cc75ed4682","Type":"ContainerDied","Data":"538f42641ddbc0a6d9ddda8178e03c0dd6e4dec215f1fb4dcc313cee013cd345"} Jan 29 12:19:57 crc kubenswrapper[4753]: I0129 12:19:57.668609 4753 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 29 12:19:58 crc kubenswrapper[4753]: I0129 12:19:58.207034 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-qk6cr"] Jan 29 12:19:58 crc kubenswrapper[4753]: I0129 12:19:58.209171 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-qk6cr" Jan 29 12:19:58 crc kubenswrapper[4753]: I0129 12:19:58.232560 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-qk6cr"] Jan 29 12:19:58 crc kubenswrapper[4753]: I0129 12:19:58.330393 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9ba3c1d5-b44b-4207-9aea-2a5f2e97f9b9-catalog-content\") pod \"redhat-operators-qk6cr\" (UID: \"9ba3c1d5-b44b-4207-9aea-2a5f2e97f9b9\") " pod="openshift-marketplace/redhat-operators-qk6cr" Jan 29 12:19:58 crc kubenswrapper[4753]: I0129 12:19:58.330439 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8wwg9\" (UniqueName: \"kubernetes.io/projected/9ba3c1d5-b44b-4207-9aea-2a5f2e97f9b9-kube-api-access-8wwg9\") pod \"redhat-operators-qk6cr\" (UID: \"9ba3c1d5-b44b-4207-9aea-2a5f2e97f9b9\") " pod="openshift-marketplace/redhat-operators-qk6cr" Jan 29 12:19:58 crc kubenswrapper[4753]: I0129 12:19:58.330480 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9ba3c1d5-b44b-4207-9aea-2a5f2e97f9b9-utilities\") pod \"redhat-operators-qk6cr\" (UID: \"9ba3c1d5-b44b-4207-9aea-2a5f2e97f9b9\") " pod="openshift-marketplace/redhat-operators-qk6cr" Jan 29 12:19:58 crc kubenswrapper[4753]: I0129 12:19:58.431760 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9ba3c1d5-b44b-4207-9aea-2a5f2e97f9b9-catalog-content\") pod \"redhat-operators-qk6cr\" (UID: \"9ba3c1d5-b44b-4207-9aea-2a5f2e97f9b9\") " pod="openshift-marketplace/redhat-operators-qk6cr" Jan 29 12:19:58 crc kubenswrapper[4753]: I0129 12:19:58.432078 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8wwg9\" (UniqueName: \"kubernetes.io/projected/9ba3c1d5-b44b-4207-9aea-2a5f2e97f9b9-kube-api-access-8wwg9\") pod \"redhat-operators-qk6cr\" (UID: \"9ba3c1d5-b44b-4207-9aea-2a5f2e97f9b9\") " pod="openshift-marketplace/redhat-operators-qk6cr" Jan 29 12:19:58 crc kubenswrapper[4753]: I0129 12:19:58.432243 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9ba3c1d5-b44b-4207-9aea-2a5f2e97f9b9-utilities\") pod \"redhat-operators-qk6cr\" (UID: \"9ba3c1d5-b44b-4207-9aea-2a5f2e97f9b9\") " pod="openshift-marketplace/redhat-operators-qk6cr" Jan 29 12:19:58 crc kubenswrapper[4753]: I0129 12:19:58.432352 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9ba3c1d5-b44b-4207-9aea-2a5f2e97f9b9-catalog-content\") pod \"redhat-operators-qk6cr\" (UID: \"9ba3c1d5-b44b-4207-9aea-2a5f2e97f9b9\") " pod="openshift-marketplace/redhat-operators-qk6cr" Jan 29 12:19:58 crc kubenswrapper[4753]: I0129 12:19:58.432597 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9ba3c1d5-b44b-4207-9aea-2a5f2e97f9b9-utilities\") pod \"redhat-operators-qk6cr\" (UID: \"9ba3c1d5-b44b-4207-9aea-2a5f2e97f9b9\") " pod="openshift-marketplace/redhat-operators-qk6cr" Jan 29 12:19:58 crc kubenswrapper[4753]: I0129 12:19:58.452743 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8wwg9\" (UniqueName: \"kubernetes.io/projected/9ba3c1d5-b44b-4207-9aea-2a5f2e97f9b9-kube-api-access-8wwg9\") pod \"redhat-operators-qk6cr\" (UID: \"9ba3c1d5-b44b-4207-9aea-2a5f2e97f9b9\") " pod="openshift-marketplace/redhat-operators-qk6cr" Jan 29 12:19:58 crc kubenswrapper[4753]: I0129 12:19:58.529454 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-qk6cr" Jan 29 12:19:58 crc kubenswrapper[4753]: I0129 12:19:58.756992 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-qk6cr"] Jan 29 12:19:59 crc kubenswrapper[4753]: I0129 12:19:59.252529 4753 patch_prober.go:28] interesting pod/machine-config-daemon-7c24x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 12:19:59 crc kubenswrapper[4753]: I0129 12:19:59.252613 4753 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" podUID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 12:19:59 crc kubenswrapper[4753]: I0129 12:19:59.689027 4753 generic.go:334] "Generic (PLEG): container finished" podID="01efdd65-431c-4f40-b00f-00cc75ed4682" containerID="2811b3af2c066904b3485fb36d1aa56c591c6149c7919259221a966b60c02021" exitCode=0 Jan 29 12:19:59 crc kubenswrapper[4753]: I0129 12:19:59.689097 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dc7b4vm" event={"ID":"01efdd65-431c-4f40-b00f-00cc75ed4682","Type":"ContainerDied","Data":"2811b3af2c066904b3485fb36d1aa56c591c6149c7919259221a966b60c02021"} Jan 29 12:19:59 crc kubenswrapper[4753]: I0129 12:19:59.691284 4753 generic.go:334] "Generic (PLEG): container finished" podID="9ba3c1d5-b44b-4207-9aea-2a5f2e97f9b9" containerID="c06a082f753d549f33df88bd0218665d551a797178aa2b974d7e21c9f656420f" exitCode=0 Jan 29 12:19:59 crc kubenswrapper[4753]: I0129 12:19:59.691315 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qk6cr" event={"ID":"9ba3c1d5-b44b-4207-9aea-2a5f2e97f9b9","Type":"ContainerDied","Data":"c06a082f753d549f33df88bd0218665d551a797178aa2b974d7e21c9f656420f"} Jan 29 12:19:59 crc kubenswrapper[4753]: I0129 12:19:59.691351 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qk6cr" event={"ID":"9ba3c1d5-b44b-4207-9aea-2a5f2e97f9b9","Type":"ContainerStarted","Data":"91b58132b13b98d7eedd18e67fa3559a6dd212e0410f2f30f0366813eb7f5e2d"} Jan 29 12:20:00 crc kubenswrapper[4753]: I0129 12:20:00.698980 4753 generic.go:334] "Generic (PLEG): container finished" podID="01efdd65-431c-4f40-b00f-00cc75ed4682" containerID="85ad5be9f055d6f555557e58e5d4944aaa633b74e0f35084cd013cf7f6be921d" exitCode=0 Jan 29 12:20:00 crc kubenswrapper[4753]: I0129 12:20:00.699033 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dc7b4vm" event={"ID":"01efdd65-431c-4f40-b00f-00cc75ed4682","Type":"ContainerDied","Data":"85ad5be9f055d6f555557e58e5d4944aaa633b74e0f35084cd013cf7f6be921d"} Jan 29 12:20:01 crc kubenswrapper[4753]: I0129 12:20:01.508813 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-plnc9" Jan 29 12:20:01 crc kubenswrapper[4753]: I0129 12:20:01.727049 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qk6cr" event={"ID":"9ba3c1d5-b44b-4207-9aea-2a5f2e97f9b9","Type":"ContainerStarted","Data":"eef34bdccd7f3976b3a8a08045556be6a3b316f4ea4f011450712918609bc80c"} Jan 29 12:20:01 crc kubenswrapper[4753]: I0129 12:20:01.967550 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dc7b4vm" Jan 29 12:20:02 crc kubenswrapper[4753]: I0129 12:20:02.125379 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/01efdd65-431c-4f40-b00f-00cc75ed4682-bundle\") pod \"01efdd65-431c-4f40-b00f-00cc75ed4682\" (UID: \"01efdd65-431c-4f40-b00f-00cc75ed4682\") " Jan 29 12:20:02 crc kubenswrapper[4753]: I0129 12:20:02.125498 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/01efdd65-431c-4f40-b00f-00cc75ed4682-util\") pod \"01efdd65-431c-4f40-b00f-00cc75ed4682\" (UID: \"01efdd65-431c-4f40-b00f-00cc75ed4682\") " Jan 29 12:20:02 crc kubenswrapper[4753]: I0129 12:20:02.125586 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5sn2c\" (UniqueName: \"kubernetes.io/projected/01efdd65-431c-4f40-b00f-00cc75ed4682-kube-api-access-5sn2c\") pod \"01efdd65-431c-4f40-b00f-00cc75ed4682\" (UID: \"01efdd65-431c-4f40-b00f-00cc75ed4682\") " Jan 29 12:20:02 crc kubenswrapper[4753]: I0129 12:20:02.126700 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/01efdd65-431c-4f40-b00f-00cc75ed4682-bundle" (OuterVolumeSpecName: "bundle") pod "01efdd65-431c-4f40-b00f-00cc75ed4682" (UID: "01efdd65-431c-4f40-b00f-00cc75ed4682"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:20:02 crc kubenswrapper[4753]: I0129 12:20:02.147630 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01efdd65-431c-4f40-b00f-00cc75ed4682-kube-api-access-5sn2c" (OuterVolumeSpecName: "kube-api-access-5sn2c") pod "01efdd65-431c-4f40-b00f-00cc75ed4682" (UID: "01efdd65-431c-4f40-b00f-00cc75ed4682"). InnerVolumeSpecName "kube-api-access-5sn2c". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:20:02 crc kubenswrapper[4753]: I0129 12:20:02.187113 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/01efdd65-431c-4f40-b00f-00cc75ed4682-util" (OuterVolumeSpecName: "util") pod "01efdd65-431c-4f40-b00f-00cc75ed4682" (UID: "01efdd65-431c-4f40-b00f-00cc75ed4682"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:20:02 crc kubenswrapper[4753]: I0129 12:20:02.227089 4753 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/01efdd65-431c-4f40-b00f-00cc75ed4682-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 12:20:02 crc kubenswrapper[4753]: I0129 12:20:02.227150 4753 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/01efdd65-431c-4f40-b00f-00cc75ed4682-util\") on node \"crc\" DevicePath \"\"" Jan 29 12:20:02 crc kubenswrapper[4753]: I0129 12:20:02.227161 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5sn2c\" (UniqueName: \"kubernetes.io/projected/01efdd65-431c-4f40-b00f-00cc75ed4682-kube-api-access-5sn2c\") on node \"crc\" DevicePath \"\"" Jan 29 12:20:02 crc kubenswrapper[4753]: I0129 12:20:02.733649 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dc7b4vm" event={"ID":"01efdd65-431c-4f40-b00f-00cc75ed4682","Type":"ContainerDied","Data":"547114d445c1fa050f560ce7f0fdbf0867eed8903b14c077378e2d3cf8ee2b25"} Jan 29 12:20:02 crc kubenswrapper[4753]: I0129 12:20:02.733702 4753 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="547114d445c1fa050f560ce7f0fdbf0867eed8903b14c077378e2d3cf8ee2b25" Jan 29 12:20:02 crc kubenswrapper[4753]: I0129 12:20:02.733836 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dc7b4vm" Jan 29 12:20:02 crc kubenswrapper[4753]: I0129 12:20:02.736048 4753 generic.go:334] "Generic (PLEG): container finished" podID="9ba3c1d5-b44b-4207-9aea-2a5f2e97f9b9" containerID="eef34bdccd7f3976b3a8a08045556be6a3b316f4ea4f011450712918609bc80c" exitCode=0 Jan 29 12:20:02 crc kubenswrapper[4753]: I0129 12:20:02.736082 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qk6cr" event={"ID":"9ba3c1d5-b44b-4207-9aea-2a5f2e97f9b9","Type":"ContainerDied","Data":"eef34bdccd7f3976b3a8a08045556be6a3b316f4ea4f011450712918609bc80c"} Jan 29 12:20:03 crc kubenswrapper[4753]: I0129 12:20:03.743928 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qk6cr" event={"ID":"9ba3c1d5-b44b-4207-9aea-2a5f2e97f9b9","Type":"ContainerStarted","Data":"fc427aad5bdb1df0a7f89071285fdc30dc50cdc63f061669c9b4627e6e687d01"} Jan 29 12:20:03 crc kubenswrapper[4753]: I0129 12:20:03.764722 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-qk6cr" podStartSLOduration=2.158886286 podStartE2EDuration="5.764703989s" podCreationTimestamp="2026-01-29 12:19:58 +0000 UTC" firstStartedPulling="2026-01-29 12:19:59.6933721 +0000 UTC m=+813.945453555" lastFinishedPulling="2026-01-29 12:20:03.299189803 +0000 UTC m=+817.551271258" observedRunningTime="2026-01-29 12:20:03.761250711 +0000 UTC m=+818.013332186" watchObservedRunningTime="2026-01-29 12:20:03.764703989 +0000 UTC m=+818.016785444" Jan 29 12:20:08 crc kubenswrapper[4753]: I0129 12:20:08.529887 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-qk6cr" Jan 29 12:20:08 crc kubenswrapper[4753]: I0129 12:20:08.531450 4753 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-qk6cr" Jan 29 12:20:09 crc kubenswrapper[4753]: I0129 12:20:09.847120 4753 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-qk6cr" podUID="9ba3c1d5-b44b-4207-9aea-2a5f2e97f9b9" containerName="registry-server" probeResult="failure" output=< Jan 29 12:20:09 crc kubenswrapper[4753]: timeout: failed to connect service ":50051" within 1s Jan 29 12:20:09 crc kubenswrapper[4753]: > Jan 29 12:20:12 crc kubenswrapper[4753]: I0129 12:20:12.707637 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-controller-manager-68df96bfd5-7k5hh"] Jan 29 12:20:12 crc kubenswrapper[4753]: E0129 12:20:12.708432 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="01efdd65-431c-4f40-b00f-00cc75ed4682" containerName="pull" Jan 29 12:20:12 crc kubenswrapper[4753]: I0129 12:20:12.708467 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="01efdd65-431c-4f40-b00f-00cc75ed4682" containerName="pull" Jan 29 12:20:12 crc kubenswrapper[4753]: E0129 12:20:12.708499 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="01efdd65-431c-4f40-b00f-00cc75ed4682" containerName="util" Jan 29 12:20:12 crc kubenswrapper[4753]: I0129 12:20:12.708512 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="01efdd65-431c-4f40-b00f-00cc75ed4682" containerName="util" Jan 29 12:20:12 crc kubenswrapper[4753]: E0129 12:20:12.708532 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="01efdd65-431c-4f40-b00f-00cc75ed4682" containerName="extract" Jan 29 12:20:12 crc kubenswrapper[4753]: I0129 12:20:12.708540 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="01efdd65-431c-4f40-b00f-00cc75ed4682" containerName="extract" Jan 29 12:20:12 crc kubenswrapper[4753]: I0129 12:20:12.708721 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="01efdd65-431c-4f40-b00f-00cc75ed4682" containerName="extract" Jan 29 12:20:12 crc kubenswrapper[4753]: I0129 12:20:12.709541 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-68df96bfd5-7k5hh" Jan 29 12:20:12 crc kubenswrapper[4753]: I0129 12:20:12.720439 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Jan 29 12:20:12 crc kubenswrapper[4753]: I0129 12:20:12.720784 4753 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-cert" Jan 29 12:20:12 crc kubenswrapper[4753]: I0129 12:20:12.721062 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Jan 29 12:20:12 crc kubenswrapper[4753]: I0129 12:20:12.721367 4753 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Jan 29 12:20:12 crc kubenswrapper[4753]: I0129 12:20:12.722100 4753 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-9gm4v" Jan 29 12:20:12 crc kubenswrapper[4753]: I0129 12:20:12.739865 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-68df96bfd5-7k5hh"] Jan 29 12:20:13 crc kubenswrapper[4753]: I0129 12:20:13.143683 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/3695d573-e0e3-469a-b536-6ed32ade8e82-apiservice-cert\") pod \"metallb-operator-controller-manager-68df96bfd5-7k5hh\" (UID: \"3695d573-e0e3-469a-b536-6ed32ade8e82\") " pod="metallb-system/metallb-operator-controller-manager-68df96bfd5-7k5hh" Jan 29 12:20:13 crc kubenswrapper[4753]: I0129 12:20:13.144193 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gtqdl\" (UniqueName: \"kubernetes.io/projected/3695d573-e0e3-469a-b536-6ed32ade8e82-kube-api-access-gtqdl\") pod \"metallb-operator-controller-manager-68df96bfd5-7k5hh\" (UID: \"3695d573-e0e3-469a-b536-6ed32ade8e82\") " pod="metallb-system/metallb-operator-controller-manager-68df96bfd5-7k5hh" Jan 29 12:20:13 crc kubenswrapper[4753]: I0129 12:20:13.144327 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/3695d573-e0e3-469a-b536-6ed32ade8e82-webhook-cert\") pod \"metallb-operator-controller-manager-68df96bfd5-7k5hh\" (UID: \"3695d573-e0e3-469a-b536-6ed32ade8e82\") " pod="metallb-system/metallb-operator-controller-manager-68df96bfd5-7k5hh" Jan 29 12:20:13 crc kubenswrapper[4753]: I0129 12:20:13.245320 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/3695d573-e0e3-469a-b536-6ed32ade8e82-apiservice-cert\") pod \"metallb-operator-controller-manager-68df96bfd5-7k5hh\" (UID: \"3695d573-e0e3-469a-b536-6ed32ade8e82\") " pod="metallb-system/metallb-operator-controller-manager-68df96bfd5-7k5hh" Jan 29 12:20:13 crc kubenswrapper[4753]: I0129 12:20:13.245617 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gtqdl\" (UniqueName: \"kubernetes.io/projected/3695d573-e0e3-469a-b536-6ed32ade8e82-kube-api-access-gtqdl\") pod \"metallb-operator-controller-manager-68df96bfd5-7k5hh\" (UID: \"3695d573-e0e3-469a-b536-6ed32ade8e82\") " pod="metallb-system/metallb-operator-controller-manager-68df96bfd5-7k5hh" Jan 29 12:20:13 crc kubenswrapper[4753]: I0129 12:20:13.245713 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/3695d573-e0e3-469a-b536-6ed32ade8e82-webhook-cert\") pod \"metallb-operator-controller-manager-68df96bfd5-7k5hh\" (UID: \"3695d573-e0e3-469a-b536-6ed32ade8e82\") " pod="metallb-system/metallb-operator-controller-manager-68df96bfd5-7k5hh" Jan 29 12:20:13 crc kubenswrapper[4753]: I0129 12:20:13.252187 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/3695d573-e0e3-469a-b536-6ed32ade8e82-apiservice-cert\") pod \"metallb-operator-controller-manager-68df96bfd5-7k5hh\" (UID: \"3695d573-e0e3-469a-b536-6ed32ade8e82\") " pod="metallb-system/metallb-operator-controller-manager-68df96bfd5-7k5hh" Jan 29 12:20:13 crc kubenswrapper[4753]: I0129 12:20:13.259063 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/3695d573-e0e3-469a-b536-6ed32ade8e82-webhook-cert\") pod \"metallb-operator-controller-manager-68df96bfd5-7k5hh\" (UID: \"3695d573-e0e3-469a-b536-6ed32ade8e82\") " pod="metallb-system/metallb-operator-controller-manager-68df96bfd5-7k5hh" Jan 29 12:20:13 crc kubenswrapper[4753]: I0129 12:20:13.265443 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gtqdl\" (UniqueName: \"kubernetes.io/projected/3695d573-e0e3-469a-b536-6ed32ade8e82-kube-api-access-gtqdl\") pod \"metallb-operator-controller-manager-68df96bfd5-7k5hh\" (UID: \"3695d573-e0e3-469a-b536-6ed32ade8e82\") " pod="metallb-system/metallb-operator-controller-manager-68df96bfd5-7k5hh" Jan 29 12:20:13 crc kubenswrapper[4753]: I0129 12:20:13.620649 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-68df96bfd5-7k5hh" Jan 29 12:20:14 crc kubenswrapper[4753]: I0129 12:20:13.826102 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-webhook-server-6787488fdd-zv44d"] Jan 29 12:20:14 crc kubenswrapper[4753]: I0129 12:20:13.827135 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-6787488fdd-zv44d" Jan 29 12:20:14 crc kubenswrapper[4753]: I0129 12:20:14.200206 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g8txh\" (UniqueName: \"kubernetes.io/projected/f743a92e-c395-4123-b0ed-58237f1b1c8f-kube-api-access-g8txh\") pod \"metallb-operator-webhook-server-6787488fdd-zv44d\" (UID: \"f743a92e-c395-4123-b0ed-58237f1b1c8f\") " pod="metallb-system/metallb-operator-webhook-server-6787488fdd-zv44d" Jan 29 12:20:14 crc kubenswrapper[4753]: I0129 12:20:14.200509 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/f743a92e-c395-4123-b0ed-58237f1b1c8f-apiservice-cert\") pod \"metallb-operator-webhook-server-6787488fdd-zv44d\" (UID: \"f743a92e-c395-4123-b0ed-58237f1b1c8f\") " pod="metallb-system/metallb-operator-webhook-server-6787488fdd-zv44d" Jan 29 12:20:14 crc kubenswrapper[4753]: I0129 12:20:14.200583 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/f743a92e-c395-4123-b0ed-58237f1b1c8f-webhook-cert\") pod \"metallb-operator-webhook-server-6787488fdd-zv44d\" (UID: \"f743a92e-c395-4123-b0ed-58237f1b1c8f\") " pod="metallb-system/metallb-operator-webhook-server-6787488fdd-zv44d" Jan 29 12:20:14 crc kubenswrapper[4753]: I0129 12:20:14.226292 4753 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Jan 29 12:20:14 crc kubenswrapper[4753]: I0129 12:20:14.226444 4753 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Jan 29 12:20:14 crc kubenswrapper[4753]: I0129 12:20:14.226515 4753 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-wvbj9" Jan 29 12:20:14 crc kubenswrapper[4753]: I0129 12:20:14.253493 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-6787488fdd-zv44d"] Jan 29 12:20:14 crc kubenswrapper[4753]: I0129 12:20:14.303291 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/f743a92e-c395-4123-b0ed-58237f1b1c8f-webhook-cert\") pod \"metallb-operator-webhook-server-6787488fdd-zv44d\" (UID: \"f743a92e-c395-4123-b0ed-58237f1b1c8f\") " pod="metallb-system/metallb-operator-webhook-server-6787488fdd-zv44d" Jan 29 12:20:14 crc kubenswrapper[4753]: I0129 12:20:14.303510 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g8txh\" (UniqueName: \"kubernetes.io/projected/f743a92e-c395-4123-b0ed-58237f1b1c8f-kube-api-access-g8txh\") pod \"metallb-operator-webhook-server-6787488fdd-zv44d\" (UID: \"f743a92e-c395-4123-b0ed-58237f1b1c8f\") " pod="metallb-system/metallb-operator-webhook-server-6787488fdd-zv44d" Jan 29 12:20:14 crc kubenswrapper[4753]: I0129 12:20:14.304331 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/f743a92e-c395-4123-b0ed-58237f1b1c8f-apiservice-cert\") pod \"metallb-operator-webhook-server-6787488fdd-zv44d\" (UID: \"f743a92e-c395-4123-b0ed-58237f1b1c8f\") " pod="metallb-system/metallb-operator-webhook-server-6787488fdd-zv44d" Jan 29 12:20:14 crc kubenswrapper[4753]: I0129 12:20:14.591294 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/f743a92e-c395-4123-b0ed-58237f1b1c8f-apiservice-cert\") pod \"metallb-operator-webhook-server-6787488fdd-zv44d\" (UID: \"f743a92e-c395-4123-b0ed-58237f1b1c8f\") " pod="metallb-system/metallb-operator-webhook-server-6787488fdd-zv44d" Jan 29 12:20:14 crc kubenswrapper[4753]: I0129 12:20:14.597077 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/f743a92e-c395-4123-b0ed-58237f1b1c8f-webhook-cert\") pod \"metallb-operator-webhook-server-6787488fdd-zv44d\" (UID: \"f743a92e-c395-4123-b0ed-58237f1b1c8f\") " pod="metallb-system/metallb-operator-webhook-server-6787488fdd-zv44d" Jan 29 12:20:14 crc kubenswrapper[4753]: I0129 12:20:14.599944 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g8txh\" (UniqueName: \"kubernetes.io/projected/f743a92e-c395-4123-b0ed-58237f1b1c8f-kube-api-access-g8txh\") pod \"metallb-operator-webhook-server-6787488fdd-zv44d\" (UID: \"f743a92e-c395-4123-b0ed-58237f1b1c8f\") " pod="metallb-system/metallb-operator-webhook-server-6787488fdd-zv44d" Jan 29 12:20:14 crc kubenswrapper[4753]: I0129 12:20:14.684507 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-68df96bfd5-7k5hh"] Jan 29 12:20:14 crc kubenswrapper[4753]: W0129 12:20:14.695401 4753 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3695d573_e0e3_469a_b536_6ed32ade8e82.slice/crio-5c3c1968c8af2f18ecbd09b61bfd49d38441deb746bca7998abea25d0a5e755c WatchSource:0}: Error finding container 5c3c1968c8af2f18ecbd09b61bfd49d38441deb746bca7998abea25d0a5e755c: Status 404 returned error can't find the container with id 5c3c1968c8af2f18ecbd09b61bfd49d38441deb746bca7998abea25d0a5e755c Jan 29 12:20:14 crc kubenswrapper[4753]: I0129 12:20:14.818303 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-6787488fdd-zv44d" Jan 29 12:20:15 crc kubenswrapper[4753]: I0129 12:20:15.220889 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-68df96bfd5-7k5hh" event={"ID":"3695d573-e0e3-469a-b536-6ed32ade8e82","Type":"ContainerStarted","Data":"5c3c1968c8af2f18ecbd09b61bfd49d38441deb746bca7998abea25d0a5e755c"} Jan 29 12:20:15 crc kubenswrapper[4753]: I0129 12:20:15.684939 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-6787488fdd-zv44d"] Jan 29 12:20:16 crc kubenswrapper[4753]: I0129 12:20:16.228076 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-6787488fdd-zv44d" event={"ID":"f743a92e-c395-4123-b0ed-58237f1b1c8f","Type":"ContainerStarted","Data":"a42505a29bdab41c139e9bfa3d4e6fb888d77641bf874503675b93e42da7f17a"} Jan 29 12:20:18 crc kubenswrapper[4753]: I0129 12:20:18.575307 4753 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-qk6cr" Jan 29 12:20:18 crc kubenswrapper[4753]: I0129 12:20:18.635108 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-qk6cr" Jan 29 12:20:20 crc kubenswrapper[4753]: I0129 12:20:20.995779 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-qk6cr"] Jan 29 12:20:20 crc kubenswrapper[4753]: I0129 12:20:20.996377 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-qk6cr" podUID="9ba3c1d5-b44b-4207-9aea-2a5f2e97f9b9" containerName="registry-server" containerID="cri-o://fc427aad5bdb1df0a7f89071285fdc30dc50cdc63f061669c9b4627e6e687d01" gracePeriod=2 Jan 29 12:20:21 crc kubenswrapper[4753]: I0129 12:20:21.263313 4753 generic.go:334] "Generic (PLEG): container finished" podID="9ba3c1d5-b44b-4207-9aea-2a5f2e97f9b9" containerID="fc427aad5bdb1df0a7f89071285fdc30dc50cdc63f061669c9b4627e6e687d01" exitCode=0 Jan 29 12:20:21 crc kubenswrapper[4753]: I0129 12:20:21.263717 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qk6cr" event={"ID":"9ba3c1d5-b44b-4207-9aea-2a5f2e97f9b9","Type":"ContainerDied","Data":"fc427aad5bdb1df0a7f89071285fdc30dc50cdc63f061669c9b4627e6e687d01"} Jan 29 12:20:21 crc kubenswrapper[4753]: I0129 12:20:21.265940 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-68df96bfd5-7k5hh" event={"ID":"3695d573-e0e3-469a-b536-6ed32ade8e82","Type":"ContainerStarted","Data":"e05987456fbbd962d999e56e72f39dfdd047b2daccd587f86788fc9922b1241d"} Jan 29 12:20:21 crc kubenswrapper[4753]: I0129 12:20:21.267054 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-68df96bfd5-7k5hh" Jan 29 12:20:21 crc kubenswrapper[4753]: I0129 12:20:21.291942 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-controller-manager-68df96bfd5-7k5hh" podStartSLOduration=3.778145038 podStartE2EDuration="9.29189705s" podCreationTimestamp="2026-01-29 12:20:12 +0000 UTC" firstStartedPulling="2026-01-29 12:20:14.699353751 +0000 UTC m=+828.951435206" lastFinishedPulling="2026-01-29 12:20:20.213105753 +0000 UTC m=+834.465187218" observedRunningTime="2026-01-29 12:20:21.288000679 +0000 UTC m=+835.540082144" watchObservedRunningTime="2026-01-29 12:20:21.29189705 +0000 UTC m=+835.543978505" Jan 29 12:20:22 crc kubenswrapper[4753]: I0129 12:20:22.082773 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-qk6cr" Jan 29 12:20:22 crc kubenswrapper[4753]: I0129 12:20:22.171470 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8wwg9\" (UniqueName: \"kubernetes.io/projected/9ba3c1d5-b44b-4207-9aea-2a5f2e97f9b9-kube-api-access-8wwg9\") pod \"9ba3c1d5-b44b-4207-9aea-2a5f2e97f9b9\" (UID: \"9ba3c1d5-b44b-4207-9aea-2a5f2e97f9b9\") " Jan 29 12:20:22 crc kubenswrapper[4753]: I0129 12:20:22.171625 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9ba3c1d5-b44b-4207-9aea-2a5f2e97f9b9-utilities\") pod \"9ba3c1d5-b44b-4207-9aea-2a5f2e97f9b9\" (UID: \"9ba3c1d5-b44b-4207-9aea-2a5f2e97f9b9\") " Jan 29 12:20:22 crc kubenswrapper[4753]: I0129 12:20:22.171666 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9ba3c1d5-b44b-4207-9aea-2a5f2e97f9b9-catalog-content\") pod \"9ba3c1d5-b44b-4207-9aea-2a5f2e97f9b9\" (UID: \"9ba3c1d5-b44b-4207-9aea-2a5f2e97f9b9\") " Jan 29 12:20:22 crc kubenswrapper[4753]: I0129 12:20:22.172722 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9ba3c1d5-b44b-4207-9aea-2a5f2e97f9b9-utilities" (OuterVolumeSpecName: "utilities") pod "9ba3c1d5-b44b-4207-9aea-2a5f2e97f9b9" (UID: "9ba3c1d5-b44b-4207-9aea-2a5f2e97f9b9"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:20:22 crc kubenswrapper[4753]: I0129 12:20:22.176963 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9ba3c1d5-b44b-4207-9aea-2a5f2e97f9b9-kube-api-access-8wwg9" (OuterVolumeSpecName: "kube-api-access-8wwg9") pod "9ba3c1d5-b44b-4207-9aea-2a5f2e97f9b9" (UID: "9ba3c1d5-b44b-4207-9aea-2a5f2e97f9b9"). InnerVolumeSpecName "kube-api-access-8wwg9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:20:22 crc kubenswrapper[4753]: I0129 12:20:22.273671 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8wwg9\" (UniqueName: \"kubernetes.io/projected/9ba3c1d5-b44b-4207-9aea-2a5f2e97f9b9-kube-api-access-8wwg9\") on node \"crc\" DevicePath \"\"" Jan 29 12:20:22 crc kubenswrapper[4753]: I0129 12:20:22.273733 4753 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9ba3c1d5-b44b-4207-9aea-2a5f2e97f9b9-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 12:20:22 crc kubenswrapper[4753]: I0129 12:20:22.276898 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qk6cr" event={"ID":"9ba3c1d5-b44b-4207-9aea-2a5f2e97f9b9","Type":"ContainerDied","Data":"91b58132b13b98d7eedd18e67fa3559a6dd212e0410f2f30f0366813eb7f5e2d"} Jan 29 12:20:22 crc kubenswrapper[4753]: I0129 12:20:22.276959 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-qk6cr" Jan 29 12:20:22 crc kubenswrapper[4753]: I0129 12:20:22.277024 4753 scope.go:117] "RemoveContainer" containerID="fc427aad5bdb1df0a7f89071285fdc30dc50cdc63f061669c9b4627e6e687d01" Jan 29 12:20:22 crc kubenswrapper[4753]: I0129 12:20:22.281318 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-6787488fdd-zv44d" event={"ID":"f743a92e-c395-4123-b0ed-58237f1b1c8f","Type":"ContainerStarted","Data":"be1393b6ac22c739654009e38c3f58b74735c089420c5cef9a9aa6568981cec3"} Jan 29 12:20:22 crc kubenswrapper[4753]: I0129 12:20:22.281401 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-6787488fdd-zv44d" Jan 29 12:20:22 crc kubenswrapper[4753]: I0129 12:20:22.287955 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9ba3c1d5-b44b-4207-9aea-2a5f2e97f9b9-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9ba3c1d5-b44b-4207-9aea-2a5f2e97f9b9" (UID: "9ba3c1d5-b44b-4207-9aea-2a5f2e97f9b9"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:20:22 crc kubenswrapper[4753]: I0129 12:20:22.309034 4753 scope.go:117] "RemoveContainer" containerID="eef34bdccd7f3976b3a8a08045556be6a3b316f4ea4f011450712918609bc80c" Jan 29 12:20:22 crc kubenswrapper[4753]: I0129 12:20:22.313342 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-webhook-server-6787488fdd-zv44d" podStartSLOduration=3.102645382 podStartE2EDuration="9.313317655s" podCreationTimestamp="2026-01-29 12:20:13 +0000 UTC" firstStartedPulling="2026-01-29 12:20:15.704264706 +0000 UTC m=+829.956346161" lastFinishedPulling="2026-01-29 12:20:21.914936979 +0000 UTC m=+836.167018434" observedRunningTime="2026-01-29 12:20:22.308864168 +0000 UTC m=+836.560945623" watchObservedRunningTime="2026-01-29 12:20:22.313317655 +0000 UTC m=+836.565399110" Jan 29 12:20:22 crc kubenswrapper[4753]: I0129 12:20:22.332213 4753 scope.go:117] "RemoveContainer" containerID="c06a082f753d549f33df88bd0218665d551a797178aa2b974d7e21c9f656420f" Jan 29 12:20:22 crc kubenswrapper[4753]: I0129 12:20:22.375612 4753 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9ba3c1d5-b44b-4207-9aea-2a5f2e97f9b9-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 12:20:22 crc kubenswrapper[4753]: I0129 12:20:22.606402 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-qk6cr"] Jan 29 12:20:22 crc kubenswrapper[4753]: I0129 12:20:22.610349 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-qk6cr"] Jan 29 12:20:23 crc kubenswrapper[4753]: I0129 12:20:23.936747 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9ba3c1d5-b44b-4207-9aea-2a5f2e97f9b9" path="/var/lib/kubelet/pods/9ba3c1d5-b44b-4207-9aea-2a5f2e97f9b9/volumes" Jan 29 12:20:29 crc kubenswrapper[4753]: I0129 12:20:29.253314 4753 patch_prober.go:28] interesting pod/machine-config-daemon-7c24x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 12:20:29 crc kubenswrapper[4753]: I0129 12:20:29.253728 4753 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" podUID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 12:20:29 crc kubenswrapper[4753]: I0129 12:20:29.253799 4753 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" Jan 29 12:20:29 crc kubenswrapper[4753]: I0129 12:20:29.254492 4753 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"4b8aed6ea15733b89649d8b36ec89a9ec9f88b46186a18e1942be3af55f96320"} pod="openshift-machine-config-operator/machine-config-daemon-7c24x" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 29 12:20:29 crc kubenswrapper[4753]: I0129 12:20:29.254541 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" podUID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" containerName="machine-config-daemon" containerID="cri-o://4b8aed6ea15733b89649d8b36ec89a9ec9f88b46186a18e1942be3af55f96320" gracePeriod=600 Jan 29 12:20:29 crc kubenswrapper[4753]: I0129 12:20:29.575008 4753 generic.go:334] "Generic (PLEG): container finished" podID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" containerID="4b8aed6ea15733b89649d8b36ec89a9ec9f88b46186a18e1942be3af55f96320" exitCode=0 Jan 29 12:20:29 crc kubenswrapper[4753]: I0129 12:20:29.575071 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" event={"ID":"b0310995-a7c7-47c3-ae6c-05daaaba92a6","Type":"ContainerDied","Data":"4b8aed6ea15733b89649d8b36ec89a9ec9f88b46186a18e1942be3af55f96320"} Jan 29 12:20:29 crc kubenswrapper[4753]: I0129 12:20:29.575254 4753 scope.go:117] "RemoveContainer" containerID="1e0c2015443e80d2d02eec0a9677231d797e4677debbb12d709c0bd3561be1e1" Jan 29 12:20:29 crc kubenswrapper[4753]: I0129 12:20:29.625920 4753 scope.go:117] "RemoveContainer" containerID="346c36936603f4535d5ad3e6b60e451b05b4d376e2531c813f0c900c92caf5fd" Jan 29 12:20:30 crc kubenswrapper[4753]: I0129 12:20:30.584336 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" event={"ID":"b0310995-a7c7-47c3-ae6c-05daaaba92a6","Type":"ContainerStarted","Data":"a062eb50ea7e6f751f3ad76c7903eb2507edfb5f26af3ebaf7ccc8d61960baaf"} Jan 29 12:20:30 crc kubenswrapper[4753]: I0129 12:20:30.586722 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-rnbz9_b372210b-6e1b-4a80-b379-7c1d570712f3/kube-multus/1.log" Jan 29 12:20:34 crc kubenswrapper[4753]: I0129 12:20:34.834719 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-webhook-server-6787488fdd-zv44d" Jan 29 12:20:53 crc kubenswrapper[4753]: I0129 12:20:53.623873 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-68df96bfd5-7k5hh" Jan 29 12:20:54 crc kubenswrapper[4753]: I0129 12:20:54.290951 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-z5drz"] Jan 29 12:20:54 crc kubenswrapper[4753]: E0129 12:20:54.291287 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9ba3c1d5-b44b-4207-9aea-2a5f2e97f9b9" containerName="extract-content" Jan 29 12:20:54 crc kubenswrapper[4753]: I0129 12:20:54.291304 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="9ba3c1d5-b44b-4207-9aea-2a5f2e97f9b9" containerName="extract-content" Jan 29 12:20:54 crc kubenswrapper[4753]: E0129 12:20:54.291320 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9ba3c1d5-b44b-4207-9aea-2a5f2e97f9b9" containerName="extract-utilities" Jan 29 12:20:54 crc kubenswrapper[4753]: I0129 12:20:54.291327 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="9ba3c1d5-b44b-4207-9aea-2a5f2e97f9b9" containerName="extract-utilities" Jan 29 12:20:54 crc kubenswrapper[4753]: E0129 12:20:54.291334 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9ba3c1d5-b44b-4207-9aea-2a5f2e97f9b9" containerName="registry-server" Jan 29 12:20:54 crc kubenswrapper[4753]: I0129 12:20:54.291341 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="9ba3c1d5-b44b-4207-9aea-2a5f2e97f9b9" containerName="registry-server" Jan 29 12:20:54 crc kubenswrapper[4753]: I0129 12:20:54.291438 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="9ba3c1d5-b44b-4207-9aea-2a5f2e97f9b9" containerName="registry-server" Jan 29 12:20:54 crc kubenswrapper[4753]: I0129 12:20:54.293678 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-z5drz" Jan 29 12:20:54 crc kubenswrapper[4753]: I0129 12:20:54.296846 4753 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Jan 29 12:20:54 crc kubenswrapper[4753]: I0129 12:20:54.297064 4753 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-8xmz6" Jan 29 12:20:54 crc kubenswrapper[4753]: I0129 12:20:54.297261 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Jan 29 12:20:54 crc kubenswrapper[4753]: I0129 12:20:54.299015 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-webhook-server-7df86c4f6c-g8vcz"] Jan 29 12:20:54 crc kubenswrapper[4753]: I0129 12:20:54.299820 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-g8vcz" Jan 29 12:20:54 crc kubenswrapper[4753]: I0129 12:20:54.302968 4753 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Jan 29 12:20:54 crc kubenswrapper[4753]: I0129 12:20:54.304172 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7df86c4f6c-g8vcz"] Jan 29 12:20:54 crc kubenswrapper[4753]: I0129 12:20:54.395155 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/speaker-5fz59"] Jan 29 12:20:54 crc kubenswrapper[4753]: I0129 12:20:54.396850 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-5fz59" Jan 29 12:20:54 crc kubenswrapper[4753]: I0129 12:20:54.416963 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/4b689b8d-ccc2-49e6-a449-c77e354f5f86-metrics-certs\") pod \"frr-k8s-z5drz\" (UID: \"4b689b8d-ccc2-49e6-a449-c77e354f5f86\") " pod="metallb-system/frr-k8s-z5drz" Jan 29 12:20:54 crc kubenswrapper[4753]: I0129 12:20:54.417111 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/4b689b8d-ccc2-49e6-a449-c77e354f5f86-frr-startup\") pod \"frr-k8s-z5drz\" (UID: \"4b689b8d-ccc2-49e6-a449-c77e354f5f86\") " pod="metallb-system/frr-k8s-z5drz" Jan 29 12:20:54 crc kubenswrapper[4753]: I0129 12:20:54.417187 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lsqmq\" (UniqueName: \"kubernetes.io/projected/f59f410e-2d13-4bbc-aca6-c50536d77905-kube-api-access-lsqmq\") pod \"speaker-5fz59\" (UID: \"f59f410e-2d13-4bbc-aca6-c50536d77905\") " pod="metallb-system/speaker-5fz59" Jan 29 12:20:54 crc kubenswrapper[4753]: I0129 12:20:54.417259 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/e87775ff-faac-474c-814f-6873998fc276-cert\") pod \"frr-k8s-webhook-server-7df86c4f6c-g8vcz\" (UID: \"e87775ff-faac-474c-814f-6873998fc276\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-g8vcz" Jan 29 12:20:54 crc kubenswrapper[4753]: I0129 12:20:54.417288 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fhzjj\" (UniqueName: \"kubernetes.io/projected/4b689b8d-ccc2-49e6-a449-c77e354f5f86-kube-api-access-fhzjj\") pod \"frr-k8s-z5drz\" (UID: \"4b689b8d-ccc2-49e6-a449-c77e354f5f86\") " pod="metallb-system/frr-k8s-z5drz" Jan 29 12:20:54 crc kubenswrapper[4753]: I0129 12:20:54.417332 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/4b689b8d-ccc2-49e6-a449-c77e354f5f86-frr-conf\") pod \"frr-k8s-z5drz\" (UID: \"4b689b8d-ccc2-49e6-a449-c77e354f5f86\") " pod="metallb-system/frr-k8s-z5drz" Jan 29 12:20:54 crc kubenswrapper[4753]: I0129 12:20:54.417418 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/4b689b8d-ccc2-49e6-a449-c77e354f5f86-metrics\") pod \"frr-k8s-z5drz\" (UID: \"4b689b8d-ccc2-49e6-a449-c77e354f5f86\") " pod="metallb-system/frr-k8s-z5drz" Jan 29 12:20:54 crc kubenswrapper[4753]: I0129 12:20:54.417469 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/4b689b8d-ccc2-49e6-a449-c77e354f5f86-reloader\") pod \"frr-k8s-z5drz\" (UID: \"4b689b8d-ccc2-49e6-a449-c77e354f5f86\") " pod="metallb-system/frr-k8s-z5drz" Jan 29 12:20:54 crc kubenswrapper[4753]: I0129 12:20:54.417550 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f59f410e-2d13-4bbc-aca6-c50536d77905-metrics-certs\") pod \"speaker-5fz59\" (UID: \"f59f410e-2d13-4bbc-aca6-c50536d77905\") " pod="metallb-system/speaker-5fz59" Jan 29 12:20:54 crc kubenswrapper[4753]: I0129 12:20:54.417590 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/f59f410e-2d13-4bbc-aca6-c50536d77905-memberlist\") pod \"speaker-5fz59\" (UID: \"f59f410e-2d13-4bbc-aca6-c50536d77905\") " pod="metallb-system/speaker-5fz59" Jan 29 12:20:54 crc kubenswrapper[4753]: I0129 12:20:54.417639 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9jzsq\" (UniqueName: \"kubernetes.io/projected/e87775ff-faac-474c-814f-6873998fc276-kube-api-access-9jzsq\") pod \"frr-k8s-webhook-server-7df86c4f6c-g8vcz\" (UID: \"e87775ff-faac-474c-814f-6873998fc276\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-g8vcz" Jan 29 12:20:54 crc kubenswrapper[4753]: I0129 12:20:54.417685 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/4b689b8d-ccc2-49e6-a449-c77e354f5f86-frr-sockets\") pod \"frr-k8s-z5drz\" (UID: \"4b689b8d-ccc2-49e6-a449-c77e354f5f86\") " pod="metallb-system/frr-k8s-z5drz" Jan 29 12:20:54 crc kubenswrapper[4753]: I0129 12:20:54.417791 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/f59f410e-2d13-4bbc-aca6-c50536d77905-metallb-excludel2\") pod \"speaker-5fz59\" (UID: \"f59f410e-2d13-4bbc-aca6-c50536d77905\") " pod="metallb-system/speaker-5fz59" Jan 29 12:20:54 crc kubenswrapper[4753]: I0129 12:20:54.516936 4753 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-kmpjd" Jan 29 12:20:54 crc kubenswrapper[4753]: I0129 12:20:54.517201 4753 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Jan 29 12:20:54 crc kubenswrapper[4753]: I0129 12:20:54.517395 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Jan 29 12:20:54 crc kubenswrapper[4753]: I0129 12:20:54.518164 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f59f410e-2d13-4bbc-aca6-c50536d77905-metrics-certs\") pod \"speaker-5fz59\" (UID: \"f59f410e-2d13-4bbc-aca6-c50536d77905\") " pod="metallb-system/speaker-5fz59" Jan 29 12:20:54 crc kubenswrapper[4753]: I0129 12:20:54.518197 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/f59f410e-2d13-4bbc-aca6-c50536d77905-memberlist\") pod \"speaker-5fz59\" (UID: \"f59f410e-2d13-4bbc-aca6-c50536d77905\") " pod="metallb-system/speaker-5fz59" Jan 29 12:20:54 crc kubenswrapper[4753]: I0129 12:20:54.518243 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9jzsq\" (UniqueName: \"kubernetes.io/projected/e87775ff-faac-474c-814f-6873998fc276-kube-api-access-9jzsq\") pod \"frr-k8s-webhook-server-7df86c4f6c-g8vcz\" (UID: \"e87775ff-faac-474c-814f-6873998fc276\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-g8vcz" Jan 29 12:20:54 crc kubenswrapper[4753]: I0129 12:20:54.518266 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/4b689b8d-ccc2-49e6-a449-c77e354f5f86-frr-sockets\") pod \"frr-k8s-z5drz\" (UID: \"4b689b8d-ccc2-49e6-a449-c77e354f5f86\") " pod="metallb-system/frr-k8s-z5drz" Jan 29 12:20:54 crc kubenswrapper[4753]: I0129 12:20:54.518310 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/f59f410e-2d13-4bbc-aca6-c50536d77905-metallb-excludel2\") pod \"speaker-5fz59\" (UID: \"f59f410e-2d13-4bbc-aca6-c50536d77905\") " pod="metallb-system/speaker-5fz59" Jan 29 12:20:54 crc kubenswrapper[4753]: I0129 12:20:54.518352 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/4b689b8d-ccc2-49e6-a449-c77e354f5f86-metrics-certs\") pod \"frr-k8s-z5drz\" (UID: \"4b689b8d-ccc2-49e6-a449-c77e354f5f86\") " pod="metallb-system/frr-k8s-z5drz" Jan 29 12:20:54 crc kubenswrapper[4753]: I0129 12:20:54.518389 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/4b689b8d-ccc2-49e6-a449-c77e354f5f86-frr-startup\") pod \"frr-k8s-z5drz\" (UID: \"4b689b8d-ccc2-49e6-a449-c77e354f5f86\") " pod="metallb-system/frr-k8s-z5drz" Jan 29 12:20:54 crc kubenswrapper[4753]: I0129 12:20:54.518422 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lsqmq\" (UniqueName: \"kubernetes.io/projected/f59f410e-2d13-4bbc-aca6-c50536d77905-kube-api-access-lsqmq\") pod \"speaker-5fz59\" (UID: \"f59f410e-2d13-4bbc-aca6-c50536d77905\") " pod="metallb-system/speaker-5fz59" Jan 29 12:20:54 crc kubenswrapper[4753]: I0129 12:20:54.518448 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/e87775ff-faac-474c-814f-6873998fc276-cert\") pod \"frr-k8s-webhook-server-7df86c4f6c-g8vcz\" (UID: \"e87775ff-faac-474c-814f-6873998fc276\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-g8vcz" Jan 29 12:20:54 crc kubenswrapper[4753]: I0129 12:20:54.518475 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fhzjj\" (UniqueName: \"kubernetes.io/projected/4b689b8d-ccc2-49e6-a449-c77e354f5f86-kube-api-access-fhzjj\") pod \"frr-k8s-z5drz\" (UID: \"4b689b8d-ccc2-49e6-a449-c77e354f5f86\") " pod="metallb-system/frr-k8s-z5drz" Jan 29 12:20:54 crc kubenswrapper[4753]: I0129 12:20:54.518513 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/4b689b8d-ccc2-49e6-a449-c77e354f5f86-frr-conf\") pod \"frr-k8s-z5drz\" (UID: \"4b689b8d-ccc2-49e6-a449-c77e354f5f86\") " pod="metallb-system/frr-k8s-z5drz" Jan 29 12:20:54 crc kubenswrapper[4753]: I0129 12:20:54.518532 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/4b689b8d-ccc2-49e6-a449-c77e354f5f86-metrics\") pod \"frr-k8s-z5drz\" (UID: \"4b689b8d-ccc2-49e6-a449-c77e354f5f86\") " pod="metallb-system/frr-k8s-z5drz" Jan 29 12:20:54 crc kubenswrapper[4753]: I0129 12:20:54.518552 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/4b689b8d-ccc2-49e6-a449-c77e354f5f86-reloader\") pod \"frr-k8s-z5drz\" (UID: \"4b689b8d-ccc2-49e6-a449-c77e354f5f86\") " pod="metallb-system/frr-k8s-z5drz" Jan 29 12:20:54 crc kubenswrapper[4753]: E0129 12:20:54.518537 4753 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Jan 29 12:20:54 crc kubenswrapper[4753]: E0129 12:20:54.518997 4753 secret.go:188] Couldn't get secret metallb-system/frr-k8s-certs-secret: secret "frr-k8s-certs-secret" not found Jan 29 12:20:54 crc kubenswrapper[4753]: E0129 12:20:54.519127 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4b689b8d-ccc2-49e6-a449-c77e354f5f86-metrics-certs podName:4b689b8d-ccc2-49e6-a449-c77e354f5f86 nodeName:}" failed. No retries permitted until 2026-01-29 12:20:55.019073585 +0000 UTC m=+869.271155040 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/4b689b8d-ccc2-49e6-a449-c77e354f5f86-metrics-certs") pod "frr-k8s-z5drz" (UID: "4b689b8d-ccc2-49e6-a449-c77e354f5f86") : secret "frr-k8s-certs-secret" not found Jan 29 12:20:54 crc kubenswrapper[4753]: I0129 12:20:54.520093 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/f59f410e-2d13-4bbc-aca6-c50536d77905-metallb-excludel2\") pod \"speaker-5fz59\" (UID: \"f59f410e-2d13-4bbc-aca6-c50536d77905\") " pod="metallb-system/speaker-5fz59" Jan 29 12:20:54 crc kubenswrapper[4753]: I0129 12:20:54.520557 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/4b689b8d-ccc2-49e6-a449-c77e354f5f86-frr-sockets\") pod \"frr-k8s-z5drz\" (UID: \"4b689b8d-ccc2-49e6-a449-c77e354f5f86\") " pod="metallb-system/frr-k8s-z5drz" Jan 29 12:20:54 crc kubenswrapper[4753]: I0129 12:20:54.520625 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/4b689b8d-ccc2-49e6-a449-c77e354f5f86-frr-startup\") pod \"frr-k8s-z5drz\" (UID: \"4b689b8d-ccc2-49e6-a449-c77e354f5f86\") " pod="metallb-system/frr-k8s-z5drz" Jan 29 12:20:54 crc kubenswrapper[4753]: E0129 12:20:54.520662 4753 secret.go:188] Couldn't get secret metallb-system/frr-k8s-webhook-server-cert: secret "frr-k8s-webhook-server-cert" not found Jan 29 12:20:54 crc kubenswrapper[4753]: E0129 12:20:54.520718 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e87775ff-faac-474c-814f-6873998fc276-cert podName:e87775ff-faac-474c-814f-6873998fc276 nodeName:}" failed. No retries permitted until 2026-01-29 12:20:55.020702428 +0000 UTC m=+869.272783943 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/e87775ff-faac-474c-814f-6873998fc276-cert") pod "frr-k8s-webhook-server-7df86c4f6c-g8vcz" (UID: "e87775ff-faac-474c-814f-6873998fc276") : secret "frr-k8s-webhook-server-cert" not found Jan 29 12:20:54 crc kubenswrapper[4753]: I0129 12:20:54.520657 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/4b689b8d-ccc2-49e6-a449-c77e354f5f86-frr-conf\") pod \"frr-k8s-z5drz\" (UID: \"4b689b8d-ccc2-49e6-a449-c77e354f5f86\") " pod="metallb-system/frr-k8s-z5drz" Jan 29 12:20:54 crc kubenswrapper[4753]: I0129 12:20:54.520904 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/4b689b8d-ccc2-49e6-a449-c77e354f5f86-metrics\") pod \"frr-k8s-z5drz\" (UID: \"4b689b8d-ccc2-49e6-a449-c77e354f5f86\") " pod="metallb-system/frr-k8s-z5drz" Jan 29 12:20:54 crc kubenswrapper[4753]: I0129 12:20:54.521323 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/4b689b8d-ccc2-49e6-a449-c77e354f5f86-reloader\") pod \"frr-k8s-z5drz\" (UID: \"4b689b8d-ccc2-49e6-a449-c77e354f5f86\") " pod="metallb-system/frr-k8s-z5drz" Jan 29 12:20:54 crc kubenswrapper[4753]: E0129 12:20:54.523125 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f59f410e-2d13-4bbc-aca6-c50536d77905-memberlist podName:f59f410e-2d13-4bbc-aca6-c50536d77905 nodeName:}" failed. No retries permitted until 2026-01-29 12:20:55.023085443 +0000 UTC m=+869.275166898 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/f59f410e-2d13-4bbc-aca6-c50536d77905-memberlist") pod "speaker-5fz59" (UID: "f59f410e-2d13-4bbc-aca6-c50536d77905") : secret "metallb-memberlist" not found Jan 29 12:20:54 crc kubenswrapper[4753]: I0129 12:20:54.529442 4753 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Jan 29 12:20:54 crc kubenswrapper[4753]: E0129 12:20:54.531297 4753 secret.go:188] Couldn't get secret metallb-system/speaker-certs-secret: secret "speaker-certs-secret" not found Jan 29 12:20:54 crc kubenswrapper[4753]: E0129 12:20:54.531376 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f59f410e-2d13-4bbc-aca6-c50536d77905-metrics-certs podName:f59f410e-2d13-4bbc-aca6-c50536d77905 nodeName:}" failed. No retries permitted until 2026-01-29 12:20:55.031353685 +0000 UTC m=+869.283435140 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/f59f410e-2d13-4bbc-aca6-c50536d77905-metrics-certs") pod "speaker-5fz59" (UID: "f59f410e-2d13-4bbc-aca6-c50536d77905") : secret "speaker-certs-secret" not found Jan 29 12:20:54 crc kubenswrapper[4753]: I0129 12:20:54.534089 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/controller-6968d8fdc4-n9w5v"] Jan 29 12:20:54 crc kubenswrapper[4753]: I0129 12:20:54.538688 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6968d8fdc4-n9w5v" Jan 29 12:20:54 crc kubenswrapper[4753]: I0129 12:20:54.546294 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lsqmq\" (UniqueName: \"kubernetes.io/projected/f59f410e-2d13-4bbc-aca6-c50536d77905-kube-api-access-lsqmq\") pod \"speaker-5fz59\" (UID: \"f59f410e-2d13-4bbc-aca6-c50536d77905\") " pod="metallb-system/speaker-5fz59" Jan 29 12:20:54 crc kubenswrapper[4753]: I0129 12:20:54.552876 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fhzjj\" (UniqueName: \"kubernetes.io/projected/4b689b8d-ccc2-49e6-a449-c77e354f5f86-kube-api-access-fhzjj\") pod \"frr-k8s-z5drz\" (UID: \"4b689b8d-ccc2-49e6-a449-c77e354f5f86\") " pod="metallb-system/frr-k8s-z5drz" Jan 29 12:20:54 crc kubenswrapper[4753]: I0129 12:20:54.559501 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9jzsq\" (UniqueName: \"kubernetes.io/projected/e87775ff-faac-474c-814f-6873998fc276-kube-api-access-9jzsq\") pod \"frr-k8s-webhook-server-7df86c4f6c-g8vcz\" (UID: \"e87775ff-faac-474c-814f-6873998fc276\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-g8vcz" Jan 29 12:20:54 crc kubenswrapper[4753]: I0129 12:20:54.570704 4753 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Jan 29 12:20:54 crc kubenswrapper[4753]: I0129 12:20:54.576334 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6968d8fdc4-n9w5v"] Jan 29 12:20:54 crc kubenswrapper[4753]: I0129 12:20:54.619201 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rgv9v\" (UniqueName: \"kubernetes.io/projected/3135e3f4-69fe-446f-9778-3e77d2b07dbf-kube-api-access-rgv9v\") pod \"controller-6968d8fdc4-n9w5v\" (UID: \"3135e3f4-69fe-446f-9778-3e77d2b07dbf\") " pod="metallb-system/controller-6968d8fdc4-n9w5v" Jan 29 12:20:54 crc kubenswrapper[4753]: I0129 12:20:54.619591 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/3135e3f4-69fe-446f-9778-3e77d2b07dbf-cert\") pod \"controller-6968d8fdc4-n9w5v\" (UID: \"3135e3f4-69fe-446f-9778-3e77d2b07dbf\") " pod="metallb-system/controller-6968d8fdc4-n9w5v" Jan 29 12:20:54 crc kubenswrapper[4753]: I0129 12:20:54.619711 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3135e3f4-69fe-446f-9778-3e77d2b07dbf-metrics-certs\") pod \"controller-6968d8fdc4-n9w5v\" (UID: \"3135e3f4-69fe-446f-9778-3e77d2b07dbf\") " pod="metallb-system/controller-6968d8fdc4-n9w5v" Jan 29 12:20:54 crc kubenswrapper[4753]: I0129 12:20:54.720670 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rgv9v\" (UniqueName: \"kubernetes.io/projected/3135e3f4-69fe-446f-9778-3e77d2b07dbf-kube-api-access-rgv9v\") pod \"controller-6968d8fdc4-n9w5v\" (UID: \"3135e3f4-69fe-446f-9778-3e77d2b07dbf\") " pod="metallb-system/controller-6968d8fdc4-n9w5v" Jan 29 12:20:54 crc kubenswrapper[4753]: I0129 12:20:54.720761 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/3135e3f4-69fe-446f-9778-3e77d2b07dbf-cert\") pod \"controller-6968d8fdc4-n9w5v\" (UID: \"3135e3f4-69fe-446f-9778-3e77d2b07dbf\") " pod="metallb-system/controller-6968d8fdc4-n9w5v" Jan 29 12:20:54 crc kubenswrapper[4753]: I0129 12:20:54.720799 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3135e3f4-69fe-446f-9778-3e77d2b07dbf-metrics-certs\") pod \"controller-6968d8fdc4-n9w5v\" (UID: \"3135e3f4-69fe-446f-9778-3e77d2b07dbf\") " pod="metallb-system/controller-6968d8fdc4-n9w5v" Jan 29 12:20:54 crc kubenswrapper[4753]: I0129 12:20:54.724497 4753 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Jan 29 12:20:54 crc kubenswrapper[4753]: I0129 12:20:54.724974 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3135e3f4-69fe-446f-9778-3e77d2b07dbf-metrics-certs\") pod \"controller-6968d8fdc4-n9w5v\" (UID: \"3135e3f4-69fe-446f-9778-3e77d2b07dbf\") " pod="metallb-system/controller-6968d8fdc4-n9w5v" Jan 29 12:20:54 crc kubenswrapper[4753]: I0129 12:20:54.736051 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/3135e3f4-69fe-446f-9778-3e77d2b07dbf-cert\") pod \"controller-6968d8fdc4-n9w5v\" (UID: \"3135e3f4-69fe-446f-9778-3e77d2b07dbf\") " pod="metallb-system/controller-6968d8fdc4-n9w5v" Jan 29 12:20:54 crc kubenswrapper[4753]: I0129 12:20:54.740904 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rgv9v\" (UniqueName: \"kubernetes.io/projected/3135e3f4-69fe-446f-9778-3e77d2b07dbf-kube-api-access-rgv9v\") pod \"controller-6968d8fdc4-n9w5v\" (UID: \"3135e3f4-69fe-446f-9778-3e77d2b07dbf\") " pod="metallb-system/controller-6968d8fdc4-n9w5v" Jan 29 12:20:54 crc kubenswrapper[4753]: I0129 12:20:54.902649 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6968d8fdc4-n9w5v" Jan 29 12:20:55 crc kubenswrapper[4753]: I0129 12:20:55.024553 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/f59f410e-2d13-4bbc-aca6-c50536d77905-memberlist\") pod \"speaker-5fz59\" (UID: \"f59f410e-2d13-4bbc-aca6-c50536d77905\") " pod="metallb-system/speaker-5fz59" Jan 29 12:20:55 crc kubenswrapper[4753]: I0129 12:20:55.024670 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/4b689b8d-ccc2-49e6-a449-c77e354f5f86-metrics-certs\") pod \"frr-k8s-z5drz\" (UID: \"4b689b8d-ccc2-49e6-a449-c77e354f5f86\") " pod="metallb-system/frr-k8s-z5drz" Jan 29 12:20:55 crc kubenswrapper[4753]: I0129 12:20:55.024718 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/e87775ff-faac-474c-814f-6873998fc276-cert\") pod \"frr-k8s-webhook-server-7df86c4f6c-g8vcz\" (UID: \"e87775ff-faac-474c-814f-6873998fc276\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-g8vcz" Jan 29 12:20:55 crc kubenswrapper[4753]: E0129 12:20:55.024737 4753 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Jan 29 12:20:55 crc kubenswrapper[4753]: E0129 12:20:55.024807 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f59f410e-2d13-4bbc-aca6-c50536d77905-memberlist podName:f59f410e-2d13-4bbc-aca6-c50536d77905 nodeName:}" failed. No retries permitted until 2026-01-29 12:20:56.024786351 +0000 UTC m=+870.276867806 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/f59f410e-2d13-4bbc-aca6-c50536d77905-memberlist") pod "speaker-5fz59" (UID: "f59f410e-2d13-4bbc-aca6-c50536d77905") : secret "metallb-memberlist" not found Jan 29 12:20:55 crc kubenswrapper[4753]: I0129 12:20:55.040145 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/4b689b8d-ccc2-49e6-a449-c77e354f5f86-metrics-certs\") pod \"frr-k8s-z5drz\" (UID: \"4b689b8d-ccc2-49e6-a449-c77e354f5f86\") " pod="metallb-system/frr-k8s-z5drz" Jan 29 12:20:55 crc kubenswrapper[4753]: I0129 12:20:55.040212 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/e87775ff-faac-474c-814f-6873998fc276-cert\") pod \"frr-k8s-webhook-server-7df86c4f6c-g8vcz\" (UID: \"e87775ff-faac-474c-814f-6873998fc276\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-g8vcz" Jan 29 12:20:55 crc kubenswrapper[4753]: I0129 12:20:55.112148 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6968d8fdc4-n9w5v"] Jan 29 12:20:55 crc kubenswrapper[4753]: I0129 12:20:55.126053 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f59f410e-2d13-4bbc-aca6-c50536d77905-metrics-certs\") pod \"speaker-5fz59\" (UID: \"f59f410e-2d13-4bbc-aca6-c50536d77905\") " pod="metallb-system/speaker-5fz59" Jan 29 12:20:55 crc kubenswrapper[4753]: I0129 12:20:55.129107 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f59f410e-2d13-4bbc-aca6-c50536d77905-metrics-certs\") pod \"speaker-5fz59\" (UID: \"f59f410e-2d13-4bbc-aca6-c50536d77905\") " pod="metallb-system/speaker-5fz59" Jan 29 12:20:55 crc kubenswrapper[4753]: I0129 12:20:55.223571 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-z5drz" Jan 29 12:20:55 crc kubenswrapper[4753]: I0129 12:20:55.240370 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-g8vcz" Jan 29 12:20:55 crc kubenswrapper[4753]: I0129 12:20:55.671892 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7df86c4f6c-g8vcz"] Jan 29 12:20:55 crc kubenswrapper[4753]: I0129 12:20:55.805455 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-g8vcz" event={"ID":"e87775ff-faac-474c-814f-6873998fc276","Type":"ContainerStarted","Data":"48d615566d158a12470778592c8f204dc95c353cd0b46044be50423356826549"} Jan 29 12:20:55 crc kubenswrapper[4753]: I0129 12:20:55.806262 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-z5drz" event={"ID":"4b689b8d-ccc2-49e6-a449-c77e354f5f86","Type":"ContainerStarted","Data":"4c1cef82acc937209214a8cb5118a1d7a6bdcfe0afee4915439037a66be335d7"} Jan 29 12:20:55 crc kubenswrapper[4753]: I0129 12:20:55.807332 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6968d8fdc4-n9w5v" event={"ID":"3135e3f4-69fe-446f-9778-3e77d2b07dbf","Type":"ContainerStarted","Data":"495e990c90f58ea553cdea892d0dc60e48fbce143aaddf05c9827a483be3530f"} Jan 29 12:20:55 crc kubenswrapper[4753]: I0129 12:20:55.807360 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6968d8fdc4-n9w5v" event={"ID":"3135e3f4-69fe-446f-9778-3e77d2b07dbf","Type":"ContainerStarted","Data":"36f69b7f2964bf8b2d6a31db20313301a8de46ab50f45adea723cca42c4a38c9"} Jan 29 12:20:56 crc kubenswrapper[4753]: I0129 12:20:56.053795 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/f59f410e-2d13-4bbc-aca6-c50536d77905-memberlist\") pod \"speaker-5fz59\" (UID: \"f59f410e-2d13-4bbc-aca6-c50536d77905\") " pod="metallb-system/speaker-5fz59" Jan 29 12:20:56 crc kubenswrapper[4753]: I0129 12:20:56.061979 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/f59f410e-2d13-4bbc-aca6-c50536d77905-memberlist\") pod \"speaker-5fz59\" (UID: \"f59f410e-2d13-4bbc-aca6-c50536d77905\") " pod="metallb-system/speaker-5fz59" Jan 29 12:20:56 crc kubenswrapper[4753]: I0129 12:20:56.338406 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-5fz59" Jan 29 12:20:56 crc kubenswrapper[4753]: W0129 12:20:56.386488 4753 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf59f410e_2d13_4bbc_aca6_c50536d77905.slice/crio-06940b01cdfafe0c1ef86a79eef70f278d6bb0351039467e1cb98ce63828e660 WatchSource:0}: Error finding container 06940b01cdfafe0c1ef86a79eef70f278d6bb0351039467e1cb98ce63828e660: Status 404 returned error can't find the container with id 06940b01cdfafe0c1ef86a79eef70f278d6bb0351039467e1cb98ce63828e660 Jan 29 12:20:56 crc kubenswrapper[4753]: I0129 12:20:56.869258 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-5fz59" event={"ID":"f59f410e-2d13-4bbc-aca6-c50536d77905","Type":"ContainerStarted","Data":"e96911d973ec38cfc8658baf816c425a525594473d990257dbe52f7c9b621040"} Jan 29 12:20:56 crc kubenswrapper[4753]: I0129 12:20:56.869326 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-5fz59" event={"ID":"f59f410e-2d13-4bbc-aca6-c50536d77905","Type":"ContainerStarted","Data":"06940b01cdfafe0c1ef86a79eef70f278d6bb0351039467e1cb98ce63828e660"} Jan 29 12:20:59 crc kubenswrapper[4753]: I0129 12:20:59.916076 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-5fz59" event={"ID":"f59f410e-2d13-4bbc-aca6-c50536d77905","Type":"ContainerStarted","Data":"12571b476c0dd29fdf3e659c5955a669c2736bb4f7665a47b1953e382f081913"} Jan 29 12:20:59 crc kubenswrapper[4753]: I0129 12:20:59.916724 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-5fz59" Jan 29 12:20:59 crc kubenswrapper[4753]: I0129 12:20:59.920591 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6968d8fdc4-n9w5v" event={"ID":"3135e3f4-69fe-446f-9778-3e77d2b07dbf","Type":"ContainerStarted","Data":"656f20f32a57051daef18b5230b2856757698045915c9bc1d1b88e764896b959"} Jan 29 12:20:59 crc kubenswrapper[4753]: I0129 12:20:59.921173 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/controller-6968d8fdc4-n9w5v" Jan 29 12:20:59 crc kubenswrapper[4753]: I0129 12:20:59.938356 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/speaker-5fz59" podStartSLOduration=3.154425142 podStartE2EDuration="5.93831982s" podCreationTimestamp="2026-01-29 12:20:54 +0000 UTC" firstStartedPulling="2026-01-29 12:20:56.725289213 +0000 UTC m=+870.977370668" lastFinishedPulling="2026-01-29 12:20:59.509183891 +0000 UTC m=+873.761265346" observedRunningTime="2026-01-29 12:20:59.937831937 +0000 UTC m=+874.189913392" watchObservedRunningTime="2026-01-29 12:20:59.93831982 +0000 UTC m=+874.190401315" Jan 29 12:20:59 crc kubenswrapper[4753]: I0129 12:20:59.992058 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/controller-6968d8fdc4-n9w5v" podStartSLOduration=1.737801505 podStartE2EDuration="5.992008662s" podCreationTimestamp="2026-01-29 12:20:54 +0000 UTC" firstStartedPulling="2026-01-29 12:20:55.253248398 +0000 UTC m=+869.505329853" lastFinishedPulling="2026-01-29 12:20:59.507455555 +0000 UTC m=+873.759537010" observedRunningTime="2026-01-29 12:20:59.986126294 +0000 UTC m=+874.238207769" watchObservedRunningTime="2026-01-29 12:20:59.992008662 +0000 UTC m=+874.244090117" Jan 29 12:21:04 crc kubenswrapper[4753]: E0129 12:21:04.427099 4753 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4b689b8d_ccc2_49e6_a449_c77e354f5f86.slice/crio-f5f0591b75c11c8b10e9bd6f7dae6e7f52fa53149e9fc7da6e94d55e85a5d960.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4b689b8d_ccc2_49e6_a449_c77e354f5f86.slice/crio-conmon-f5f0591b75c11c8b10e9bd6f7dae6e7f52fa53149e9fc7da6e94d55e85a5d960.scope\": RecentStats: unable to find data in memory cache]" Jan 29 12:21:04 crc kubenswrapper[4753]: I0129 12:21:04.956551 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-g8vcz" event={"ID":"e87775ff-faac-474c-814f-6873998fc276","Type":"ContainerStarted","Data":"9f6ac80ad86f9549327d35705f9c6fa1a90ca9c5d57a33dd980320f938afb0cd"} Jan 29 12:21:04 crc kubenswrapper[4753]: I0129 12:21:04.956890 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-g8vcz" Jan 29 12:21:04 crc kubenswrapper[4753]: I0129 12:21:04.959410 4753 generic.go:334] "Generic (PLEG): container finished" podID="4b689b8d-ccc2-49e6-a449-c77e354f5f86" containerID="f5f0591b75c11c8b10e9bd6f7dae6e7f52fa53149e9fc7da6e94d55e85a5d960" exitCode=0 Jan 29 12:21:04 crc kubenswrapper[4753]: I0129 12:21:04.959472 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-z5drz" event={"ID":"4b689b8d-ccc2-49e6-a449-c77e354f5f86","Type":"ContainerDied","Data":"f5f0591b75c11c8b10e9bd6f7dae6e7f52fa53149e9fc7da6e94d55e85a5d960"} Jan 29 12:21:04 crc kubenswrapper[4753]: I0129 12:21:04.980358 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-g8vcz" podStartSLOduration=2.527680925 podStartE2EDuration="10.980336701s" podCreationTimestamp="2026-01-29 12:20:54 +0000 UTC" firstStartedPulling="2026-01-29 12:20:55.697934834 +0000 UTC m=+869.950016289" lastFinishedPulling="2026-01-29 12:21:04.15059061 +0000 UTC m=+878.402672065" observedRunningTime="2026-01-29 12:21:04.975618534 +0000 UTC m=+879.227699999" watchObservedRunningTime="2026-01-29 12:21:04.980336701 +0000 UTC m=+879.232418156" Jan 29 12:21:05 crc kubenswrapper[4753]: I0129 12:21:05.968052 4753 generic.go:334] "Generic (PLEG): container finished" podID="4b689b8d-ccc2-49e6-a449-c77e354f5f86" containerID="0d7f6caf122548fe97adae31b26178409ed2b49aa78040dd9bd44a682982551c" exitCode=0 Jan 29 12:21:05 crc kubenswrapper[4753]: I0129 12:21:05.968134 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-z5drz" event={"ID":"4b689b8d-ccc2-49e6-a449-c77e354f5f86","Type":"ContainerDied","Data":"0d7f6caf122548fe97adae31b26178409ed2b49aa78040dd9bd44a682982551c"} Jan 29 12:21:06 crc kubenswrapper[4753]: I0129 12:21:06.342991 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/speaker-5fz59" Jan 29 12:21:06 crc kubenswrapper[4753]: I0129 12:21:06.981970 4753 generic.go:334] "Generic (PLEG): container finished" podID="4b689b8d-ccc2-49e6-a449-c77e354f5f86" containerID="3ebe958a7cee058d513e3971b9a1ea2da29845cf2107ac7667ac908ef4d2e6d1" exitCode=0 Jan 29 12:21:06 crc kubenswrapper[4753]: I0129 12:21:06.982028 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-z5drz" event={"ID":"4b689b8d-ccc2-49e6-a449-c77e354f5f86","Type":"ContainerDied","Data":"3ebe958a7cee058d513e3971b9a1ea2da29845cf2107ac7667ac908ef4d2e6d1"} Jan 29 12:21:07 crc kubenswrapper[4753]: I0129 12:21:07.996172 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-z5drz" event={"ID":"4b689b8d-ccc2-49e6-a449-c77e354f5f86","Type":"ContainerStarted","Data":"2869f0181655c7f5f99868e1c56b26936c614b99b5c2e0899829a16c86fddb76"} Jan 29 12:21:07 crc kubenswrapper[4753]: I0129 12:21:07.996575 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-z5drz" event={"ID":"4b689b8d-ccc2-49e6-a449-c77e354f5f86","Type":"ContainerStarted","Data":"ffeb92b19e1ae9156c0fa6184ca912704157e8c8b5036be205359ddaca2baae4"} Jan 29 12:21:07 crc kubenswrapper[4753]: I0129 12:21:07.996596 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-z5drz" event={"ID":"4b689b8d-ccc2-49e6-a449-c77e354f5f86","Type":"ContainerStarted","Data":"6809f5b464643ee8114bc9507cbe9d057f83c8a63dcd8f0eae2411357338cd08"} Jan 29 12:21:07 crc kubenswrapper[4753]: I0129 12:21:07.996609 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-z5drz" event={"ID":"4b689b8d-ccc2-49e6-a449-c77e354f5f86","Type":"ContainerStarted","Data":"6d98e17a87f31328915b8733ef1524975380a3979cbf3499e0042efbc055f2c4"} Jan 29 12:21:07 crc kubenswrapper[4753]: I0129 12:21:07.996620 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-z5drz" event={"ID":"4b689b8d-ccc2-49e6-a449-c77e354f5f86","Type":"ContainerStarted","Data":"e5dcea984b4ef24ec6e83cc4ca56d5198ff8fa40e375401cf178f205265050d4"} Jan 29 12:21:09 crc kubenswrapper[4753]: I0129 12:21:09.008716 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-z5drz" event={"ID":"4b689b8d-ccc2-49e6-a449-c77e354f5f86","Type":"ContainerStarted","Data":"8d14b9ee7dc149ddb06083d3de8020daa815d307c60456bf86c4fa4fe2f648ac"} Jan 29 12:21:09 crc kubenswrapper[4753]: I0129 12:21:09.009498 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-z5drz" Jan 29 12:21:09 crc kubenswrapper[4753]: I0129 12:21:09.037808 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-z5drz" podStartSLOduration=6.412903119 podStartE2EDuration="15.037790123s" podCreationTimestamp="2026-01-29 12:20:54 +0000 UTC" firstStartedPulling="2026-01-29 12:20:55.543963307 +0000 UTC m=+869.796044762" lastFinishedPulling="2026-01-29 12:21:04.168850321 +0000 UTC m=+878.420931766" observedRunningTime="2026-01-29 12:21:09.034462043 +0000 UTC m=+883.286543508" watchObservedRunningTime="2026-01-29 12:21:09.037790123 +0000 UTC m=+883.289871578" Jan 29 12:21:10 crc kubenswrapper[4753]: I0129 12:21:10.224830 4753 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="metallb-system/frr-k8s-z5drz" Jan 29 12:21:10 crc kubenswrapper[4753]: I0129 12:21:10.267665 4753 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="metallb-system/frr-k8s-z5drz" Jan 29 12:21:12 crc kubenswrapper[4753]: I0129 12:21:12.849084 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-index-bd94q"] Jan 29 12:21:12 crc kubenswrapper[4753]: I0129 12:21:12.850578 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-index-bd94q" Jan 29 12:21:12 crc kubenswrapper[4753]: I0129 12:21:12.852530 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Jan 29 12:21:12 crc kubenswrapper[4753]: I0129 12:21:12.852530 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Jan 29 12:21:12 crc kubenswrapper[4753]: I0129 12:21:12.853530 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-index-dockercfg-hlq6m" Jan 29 12:21:12 crc kubenswrapper[4753]: I0129 12:21:12.873262 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-index-bd94q"] Jan 29 12:21:12 crc kubenswrapper[4753]: I0129 12:21:12.896594 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fq69l\" (UniqueName: \"kubernetes.io/projected/64ccf1b0-5038-4d95-9934-530559f35f1d-kube-api-access-fq69l\") pod \"mariadb-operator-index-bd94q\" (UID: \"64ccf1b0-5038-4d95-9934-530559f35f1d\") " pod="openstack-operators/mariadb-operator-index-bd94q" Jan 29 12:21:12 crc kubenswrapper[4753]: I0129 12:21:12.997472 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fq69l\" (UniqueName: \"kubernetes.io/projected/64ccf1b0-5038-4d95-9934-530559f35f1d-kube-api-access-fq69l\") pod \"mariadb-operator-index-bd94q\" (UID: \"64ccf1b0-5038-4d95-9934-530559f35f1d\") " pod="openstack-operators/mariadb-operator-index-bd94q" Jan 29 12:21:13 crc kubenswrapper[4753]: I0129 12:21:13.016213 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fq69l\" (UniqueName: \"kubernetes.io/projected/64ccf1b0-5038-4d95-9934-530559f35f1d-kube-api-access-fq69l\") pod \"mariadb-operator-index-bd94q\" (UID: \"64ccf1b0-5038-4d95-9934-530559f35f1d\") " pod="openstack-operators/mariadb-operator-index-bd94q" Jan 29 12:21:13 crc kubenswrapper[4753]: I0129 12:21:13.184717 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-index-bd94q" Jan 29 12:21:13 crc kubenswrapper[4753]: I0129 12:21:13.406423 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-index-bd94q"] Jan 29 12:21:14 crc kubenswrapper[4753]: I0129 12:21:14.068463 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-index-bd94q" event={"ID":"64ccf1b0-5038-4d95-9934-530559f35f1d","Type":"ContainerStarted","Data":"9eaaeea778e668aaf3c6d7b6ce64278014c259e8ff027d39dde45c7600668db0"} Jan 29 12:21:14 crc kubenswrapper[4753]: I0129 12:21:14.909179 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/controller-6968d8fdc4-n9w5v" Jan 29 12:21:15 crc kubenswrapper[4753]: I0129 12:21:15.076884 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-index-bd94q" event={"ID":"64ccf1b0-5038-4d95-9934-530559f35f1d","Type":"ContainerStarted","Data":"abc190c35932947867dd7f75684232544fc9350346ea8dce845e4248d564361e"} Jan 29 12:21:15 crc kubenswrapper[4753]: I0129 12:21:15.095124 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-index-bd94q" podStartSLOduration=2.285038306 podStartE2EDuration="3.095105719s" podCreationTimestamp="2026-01-29 12:21:12 +0000 UTC" firstStartedPulling="2026-01-29 12:21:13.432451132 +0000 UTC m=+887.684532587" lastFinishedPulling="2026-01-29 12:21:14.242518545 +0000 UTC m=+888.494600000" observedRunningTime="2026-01-29 12:21:15.092057597 +0000 UTC m=+889.344139052" watchObservedRunningTime="2026-01-29 12:21:15.095105719 +0000 UTC m=+889.347187174" Jan 29 12:21:15 crc kubenswrapper[4753]: I0129 12:21:15.246525 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-g8vcz" Jan 29 12:21:16 crc kubenswrapper[4753]: I0129 12:21:16.025999 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/mariadb-operator-index-bd94q"] Jan 29 12:21:16 crc kubenswrapper[4753]: I0129 12:21:16.637081 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-index-kdzwc"] Jan 29 12:21:16 crc kubenswrapper[4753]: I0129 12:21:16.638148 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-index-kdzwc" Jan 29 12:21:16 crc kubenswrapper[4753]: I0129 12:21:16.655019 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-index-kdzwc"] Jan 29 12:21:16 crc kubenswrapper[4753]: I0129 12:21:16.766392 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qwqcj\" (UniqueName: \"kubernetes.io/projected/6a2cd3a2-57bb-4d30-8d10-666c40a8908d-kube-api-access-qwqcj\") pod \"mariadb-operator-index-kdzwc\" (UID: \"6a2cd3a2-57bb-4d30-8d10-666c40a8908d\") " pod="openstack-operators/mariadb-operator-index-kdzwc" Jan 29 12:21:16 crc kubenswrapper[4753]: I0129 12:21:16.868123 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qwqcj\" (UniqueName: \"kubernetes.io/projected/6a2cd3a2-57bb-4d30-8d10-666c40a8908d-kube-api-access-qwqcj\") pod \"mariadb-operator-index-kdzwc\" (UID: \"6a2cd3a2-57bb-4d30-8d10-666c40a8908d\") " pod="openstack-operators/mariadb-operator-index-kdzwc" Jan 29 12:21:16 crc kubenswrapper[4753]: I0129 12:21:16.898718 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qwqcj\" (UniqueName: \"kubernetes.io/projected/6a2cd3a2-57bb-4d30-8d10-666c40a8908d-kube-api-access-qwqcj\") pod \"mariadb-operator-index-kdzwc\" (UID: \"6a2cd3a2-57bb-4d30-8d10-666c40a8908d\") " pod="openstack-operators/mariadb-operator-index-kdzwc" Jan 29 12:21:16 crc kubenswrapper[4753]: I0129 12:21:16.963431 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-index-kdzwc" Jan 29 12:21:17 crc kubenswrapper[4753]: I0129 12:21:17.089285 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/mariadb-operator-index-bd94q" podUID="64ccf1b0-5038-4d95-9934-530559f35f1d" containerName="registry-server" containerID="cri-o://abc190c35932947867dd7f75684232544fc9350346ea8dce845e4248d564361e" gracePeriod=2 Jan 29 12:21:17 crc kubenswrapper[4753]: I0129 12:21:17.432397 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-index-bd94q" Jan 29 12:21:17 crc kubenswrapper[4753]: I0129 12:21:17.442107 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-index-kdzwc"] Jan 29 12:21:17 crc kubenswrapper[4753]: I0129 12:21:17.579150 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fq69l\" (UniqueName: \"kubernetes.io/projected/64ccf1b0-5038-4d95-9934-530559f35f1d-kube-api-access-fq69l\") pod \"64ccf1b0-5038-4d95-9934-530559f35f1d\" (UID: \"64ccf1b0-5038-4d95-9934-530559f35f1d\") " Jan 29 12:21:17 crc kubenswrapper[4753]: I0129 12:21:17.583582 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/64ccf1b0-5038-4d95-9934-530559f35f1d-kube-api-access-fq69l" (OuterVolumeSpecName: "kube-api-access-fq69l") pod "64ccf1b0-5038-4d95-9934-530559f35f1d" (UID: "64ccf1b0-5038-4d95-9934-530559f35f1d"). InnerVolumeSpecName "kube-api-access-fq69l". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:21:17 crc kubenswrapper[4753]: I0129 12:21:17.681320 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fq69l\" (UniqueName: \"kubernetes.io/projected/64ccf1b0-5038-4d95-9934-530559f35f1d-kube-api-access-fq69l\") on node \"crc\" DevicePath \"\"" Jan 29 12:21:18 crc kubenswrapper[4753]: I0129 12:21:18.141843 4753 generic.go:334] "Generic (PLEG): container finished" podID="64ccf1b0-5038-4d95-9934-530559f35f1d" containerID="abc190c35932947867dd7f75684232544fc9350346ea8dce845e4248d564361e" exitCode=0 Jan 29 12:21:18 crc kubenswrapper[4753]: I0129 12:21:18.142029 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-index-bd94q" Jan 29 12:21:18 crc kubenswrapper[4753]: I0129 12:21:18.142079 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-index-bd94q" event={"ID":"64ccf1b0-5038-4d95-9934-530559f35f1d","Type":"ContainerDied","Data":"abc190c35932947867dd7f75684232544fc9350346ea8dce845e4248d564361e"} Jan 29 12:21:18 crc kubenswrapper[4753]: I0129 12:21:18.142856 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-index-bd94q" event={"ID":"64ccf1b0-5038-4d95-9934-530559f35f1d","Type":"ContainerDied","Data":"9eaaeea778e668aaf3c6d7b6ce64278014c259e8ff027d39dde45c7600668db0"} Jan 29 12:21:18 crc kubenswrapper[4753]: I0129 12:21:18.142877 4753 scope.go:117] "RemoveContainer" containerID="abc190c35932947867dd7f75684232544fc9350346ea8dce845e4248d564361e" Jan 29 12:21:18 crc kubenswrapper[4753]: I0129 12:21:18.144043 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-index-kdzwc" event={"ID":"6a2cd3a2-57bb-4d30-8d10-666c40a8908d","Type":"ContainerStarted","Data":"df8d55fd68a861c4f7bbe554d9dbc06390c42dbcee96570ae7c9e27d36859f75"} Jan 29 12:21:18 crc kubenswrapper[4753]: I0129 12:21:18.162562 4753 scope.go:117] "RemoveContainer" containerID="abc190c35932947867dd7f75684232544fc9350346ea8dce845e4248d564361e" Jan 29 12:21:18 crc kubenswrapper[4753]: E0129 12:21:18.162970 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"abc190c35932947867dd7f75684232544fc9350346ea8dce845e4248d564361e\": container with ID starting with abc190c35932947867dd7f75684232544fc9350346ea8dce845e4248d564361e not found: ID does not exist" containerID="abc190c35932947867dd7f75684232544fc9350346ea8dce845e4248d564361e" Jan 29 12:21:18 crc kubenswrapper[4753]: I0129 12:21:18.163029 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"abc190c35932947867dd7f75684232544fc9350346ea8dce845e4248d564361e"} err="failed to get container status \"abc190c35932947867dd7f75684232544fc9350346ea8dce845e4248d564361e\": rpc error: code = NotFound desc = could not find container \"abc190c35932947867dd7f75684232544fc9350346ea8dce845e4248d564361e\": container with ID starting with abc190c35932947867dd7f75684232544fc9350346ea8dce845e4248d564361e not found: ID does not exist" Jan 29 12:21:18 crc kubenswrapper[4753]: I0129 12:21:18.169368 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/mariadb-operator-index-bd94q"] Jan 29 12:21:18 crc kubenswrapper[4753]: I0129 12:21:18.173660 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/mariadb-operator-index-bd94q"] Jan 29 12:21:19 crc kubenswrapper[4753]: I0129 12:21:19.153752 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-index-kdzwc" event={"ID":"6a2cd3a2-57bb-4d30-8d10-666c40a8908d","Type":"ContainerStarted","Data":"4daf2ffe545f5f8e7d9335a1f289bfed6cf75dff6ba525ff19fd0e2c6015d34b"} Jan 29 12:21:19 crc kubenswrapper[4753]: I0129 12:21:19.172872 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-index-kdzwc" podStartSLOduration=2.683870309 podStartE2EDuration="3.172849445s" podCreationTimestamp="2026-01-29 12:21:16 +0000 UTC" firstStartedPulling="2026-01-29 12:21:17.46147317 +0000 UTC m=+891.713554625" lastFinishedPulling="2026-01-29 12:21:17.950452306 +0000 UTC m=+892.202533761" observedRunningTime="2026-01-29 12:21:19.171242962 +0000 UTC m=+893.423324417" watchObservedRunningTime="2026-01-29 12:21:19.172849445 +0000 UTC m=+893.424930910" Jan 29 12:21:19 crc kubenswrapper[4753]: I0129 12:21:19.897894 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="64ccf1b0-5038-4d95-9934-530559f35f1d" path="/var/lib/kubelet/pods/64ccf1b0-5038-4d95-9934-530559f35f1d/volumes" Jan 29 12:21:25 crc kubenswrapper[4753]: I0129 12:21:25.228100 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-z5drz" Jan 29 12:21:26 crc kubenswrapper[4753]: I0129 12:21:26.964052 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-index-kdzwc" Jan 29 12:21:26 crc kubenswrapper[4753]: I0129 12:21:26.964410 4753 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/mariadb-operator-index-kdzwc" Jan 29 12:21:26 crc kubenswrapper[4753]: I0129 12:21:26.998603 4753 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/mariadb-operator-index-kdzwc" Jan 29 12:21:27 crc kubenswrapper[4753]: I0129 12:21:27.230121 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-index-kdzwc" Jan 29 12:21:28 crc kubenswrapper[4753]: I0129 12:21:28.071508 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f405mwqg"] Jan 29 12:21:28 crc kubenswrapper[4753]: E0129 12:21:28.071772 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="64ccf1b0-5038-4d95-9934-530559f35f1d" containerName="registry-server" Jan 29 12:21:28 crc kubenswrapper[4753]: I0129 12:21:28.071784 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="64ccf1b0-5038-4d95-9934-530559f35f1d" containerName="registry-server" Jan 29 12:21:28 crc kubenswrapper[4753]: I0129 12:21:28.071900 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="64ccf1b0-5038-4d95-9934-530559f35f1d" containerName="registry-server" Jan 29 12:21:28 crc kubenswrapper[4753]: I0129 12:21:28.072689 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f405mwqg" Jan 29 12:21:28 crc kubenswrapper[4753]: I0129 12:21:28.074634 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-j2jmz" Jan 29 12:21:28 crc kubenswrapper[4753]: I0129 12:21:28.082329 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/e0312663-6b29-4d51-a91d-7e2caf2ef12d-util\") pod \"f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f405mwqg\" (UID: \"e0312663-6b29-4d51-a91d-7e2caf2ef12d\") " pod="openstack-operators/f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f405mwqg" Jan 29 12:21:28 crc kubenswrapper[4753]: I0129 12:21:28.082395 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tlsh8\" (UniqueName: \"kubernetes.io/projected/e0312663-6b29-4d51-a91d-7e2caf2ef12d-kube-api-access-tlsh8\") pod \"f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f405mwqg\" (UID: \"e0312663-6b29-4d51-a91d-7e2caf2ef12d\") " pod="openstack-operators/f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f405mwqg" Jan 29 12:21:28 crc kubenswrapper[4753]: I0129 12:21:28.082433 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/e0312663-6b29-4d51-a91d-7e2caf2ef12d-bundle\") pod \"f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f405mwqg\" (UID: \"e0312663-6b29-4d51-a91d-7e2caf2ef12d\") " pod="openstack-operators/f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f405mwqg" Jan 29 12:21:28 crc kubenswrapper[4753]: I0129 12:21:28.082967 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f405mwqg"] Jan 29 12:21:28 crc kubenswrapper[4753]: I0129 12:21:28.183773 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/e0312663-6b29-4d51-a91d-7e2caf2ef12d-util\") pod \"f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f405mwqg\" (UID: \"e0312663-6b29-4d51-a91d-7e2caf2ef12d\") " pod="openstack-operators/f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f405mwqg" Jan 29 12:21:28 crc kubenswrapper[4753]: I0129 12:21:28.184020 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tlsh8\" (UniqueName: \"kubernetes.io/projected/e0312663-6b29-4d51-a91d-7e2caf2ef12d-kube-api-access-tlsh8\") pod \"f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f405mwqg\" (UID: \"e0312663-6b29-4d51-a91d-7e2caf2ef12d\") " pod="openstack-operators/f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f405mwqg" Jan 29 12:21:28 crc kubenswrapper[4753]: I0129 12:21:28.184058 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/e0312663-6b29-4d51-a91d-7e2caf2ef12d-bundle\") pod \"f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f405mwqg\" (UID: \"e0312663-6b29-4d51-a91d-7e2caf2ef12d\") " pod="openstack-operators/f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f405mwqg" Jan 29 12:21:28 crc kubenswrapper[4753]: I0129 12:21:28.184461 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/e0312663-6b29-4d51-a91d-7e2caf2ef12d-util\") pod \"f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f405mwqg\" (UID: \"e0312663-6b29-4d51-a91d-7e2caf2ef12d\") " pod="openstack-operators/f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f405mwqg" Jan 29 12:21:28 crc kubenswrapper[4753]: I0129 12:21:28.184527 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/e0312663-6b29-4d51-a91d-7e2caf2ef12d-bundle\") pod \"f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f405mwqg\" (UID: \"e0312663-6b29-4d51-a91d-7e2caf2ef12d\") " pod="openstack-operators/f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f405mwqg" Jan 29 12:21:28 crc kubenswrapper[4753]: I0129 12:21:28.204029 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tlsh8\" (UniqueName: \"kubernetes.io/projected/e0312663-6b29-4d51-a91d-7e2caf2ef12d-kube-api-access-tlsh8\") pod \"f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f405mwqg\" (UID: \"e0312663-6b29-4d51-a91d-7e2caf2ef12d\") " pod="openstack-operators/f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f405mwqg" Jan 29 12:21:28 crc kubenswrapper[4753]: I0129 12:21:28.408266 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-j2jmz" Jan 29 12:21:28 crc kubenswrapper[4753]: I0129 12:21:28.416876 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f405mwqg" Jan 29 12:21:28 crc kubenswrapper[4753]: I0129 12:21:28.653479 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f405mwqg"] Jan 29 12:21:29 crc kubenswrapper[4753]: I0129 12:21:29.218740 4753 generic.go:334] "Generic (PLEG): container finished" podID="e0312663-6b29-4d51-a91d-7e2caf2ef12d" containerID="3f73ff64726c4e416c38dc71317be25c33816fc79a84cb44487910144496b139" exitCode=0 Jan 29 12:21:29 crc kubenswrapper[4753]: I0129 12:21:29.218802 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f405mwqg" event={"ID":"e0312663-6b29-4d51-a91d-7e2caf2ef12d","Type":"ContainerDied","Data":"3f73ff64726c4e416c38dc71317be25c33816fc79a84cb44487910144496b139"} Jan 29 12:21:29 crc kubenswrapper[4753]: I0129 12:21:29.218844 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f405mwqg" event={"ID":"e0312663-6b29-4d51-a91d-7e2caf2ef12d","Type":"ContainerStarted","Data":"717a1e5aade6e65d2dc656e6632406740e18d97869ccc07170a685bae6bb0ca0"} Jan 29 12:21:31 crc kubenswrapper[4753]: I0129 12:21:31.232677 4753 generic.go:334] "Generic (PLEG): container finished" podID="e0312663-6b29-4d51-a91d-7e2caf2ef12d" containerID="996f3aece49f84b40de1877083ab8f46266c6ebe2de86803b43fa754ebc52b06" exitCode=0 Jan 29 12:21:31 crc kubenswrapper[4753]: I0129 12:21:31.232807 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f405mwqg" event={"ID":"e0312663-6b29-4d51-a91d-7e2caf2ef12d","Type":"ContainerDied","Data":"996f3aece49f84b40de1877083ab8f46266c6ebe2de86803b43fa754ebc52b06"} Jan 29 12:21:32 crc kubenswrapper[4753]: I0129 12:21:32.240922 4753 generic.go:334] "Generic (PLEG): container finished" podID="e0312663-6b29-4d51-a91d-7e2caf2ef12d" containerID="61476c9e7ca135f2fe3fc971fa84cc1982d27a72d7c6e6dd268cf602b83b81b0" exitCode=0 Jan 29 12:21:32 crc kubenswrapper[4753]: I0129 12:21:32.241123 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f405mwqg" event={"ID":"e0312663-6b29-4d51-a91d-7e2caf2ef12d","Type":"ContainerDied","Data":"61476c9e7ca135f2fe3fc971fa84cc1982d27a72d7c6e6dd268cf602b83b81b0"} Jan 29 12:21:33 crc kubenswrapper[4753]: I0129 12:21:33.572223 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f405mwqg" Jan 29 12:21:33 crc kubenswrapper[4753]: I0129 12:21:33.662588 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tlsh8\" (UniqueName: \"kubernetes.io/projected/e0312663-6b29-4d51-a91d-7e2caf2ef12d-kube-api-access-tlsh8\") pod \"e0312663-6b29-4d51-a91d-7e2caf2ef12d\" (UID: \"e0312663-6b29-4d51-a91d-7e2caf2ef12d\") " Jan 29 12:21:33 crc kubenswrapper[4753]: I0129 12:21:33.662787 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/e0312663-6b29-4d51-a91d-7e2caf2ef12d-util\") pod \"e0312663-6b29-4d51-a91d-7e2caf2ef12d\" (UID: \"e0312663-6b29-4d51-a91d-7e2caf2ef12d\") " Jan 29 12:21:33 crc kubenswrapper[4753]: I0129 12:21:33.662860 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/e0312663-6b29-4d51-a91d-7e2caf2ef12d-bundle\") pod \"e0312663-6b29-4d51-a91d-7e2caf2ef12d\" (UID: \"e0312663-6b29-4d51-a91d-7e2caf2ef12d\") " Jan 29 12:21:33 crc kubenswrapper[4753]: I0129 12:21:33.665321 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e0312663-6b29-4d51-a91d-7e2caf2ef12d-bundle" (OuterVolumeSpecName: "bundle") pod "e0312663-6b29-4d51-a91d-7e2caf2ef12d" (UID: "e0312663-6b29-4d51-a91d-7e2caf2ef12d"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:21:33 crc kubenswrapper[4753]: I0129 12:21:33.670857 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e0312663-6b29-4d51-a91d-7e2caf2ef12d-kube-api-access-tlsh8" (OuterVolumeSpecName: "kube-api-access-tlsh8") pod "e0312663-6b29-4d51-a91d-7e2caf2ef12d" (UID: "e0312663-6b29-4d51-a91d-7e2caf2ef12d"). InnerVolumeSpecName "kube-api-access-tlsh8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:21:33 crc kubenswrapper[4753]: I0129 12:21:33.678756 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e0312663-6b29-4d51-a91d-7e2caf2ef12d-util" (OuterVolumeSpecName: "util") pod "e0312663-6b29-4d51-a91d-7e2caf2ef12d" (UID: "e0312663-6b29-4d51-a91d-7e2caf2ef12d"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:21:33 crc kubenswrapper[4753]: I0129 12:21:33.764616 4753 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/e0312663-6b29-4d51-a91d-7e2caf2ef12d-util\") on node \"crc\" DevicePath \"\"" Jan 29 12:21:33 crc kubenswrapper[4753]: I0129 12:21:33.764678 4753 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/e0312663-6b29-4d51-a91d-7e2caf2ef12d-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 12:21:33 crc kubenswrapper[4753]: I0129 12:21:33.764698 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tlsh8\" (UniqueName: \"kubernetes.io/projected/e0312663-6b29-4d51-a91d-7e2caf2ef12d-kube-api-access-tlsh8\") on node \"crc\" DevicePath \"\"" Jan 29 12:21:34 crc kubenswrapper[4753]: I0129 12:21:34.256051 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f405mwqg" event={"ID":"e0312663-6b29-4d51-a91d-7e2caf2ef12d","Type":"ContainerDied","Data":"717a1e5aade6e65d2dc656e6632406740e18d97869ccc07170a685bae6bb0ca0"} Jan 29 12:21:34 crc kubenswrapper[4753]: I0129 12:21:34.256138 4753 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="717a1e5aade6e65d2dc656e6632406740e18d97869ccc07170a685bae6bb0ca0" Jan 29 12:21:34 crc kubenswrapper[4753]: I0129 12:21:34.256139 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f405mwqg" Jan 29 12:21:34 crc kubenswrapper[4753]: I0129 12:21:34.488184 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-5z7kh"] Jan 29 12:21:34 crc kubenswrapper[4753]: E0129 12:21:34.488631 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e0312663-6b29-4d51-a91d-7e2caf2ef12d" containerName="extract" Jan 29 12:21:34 crc kubenswrapper[4753]: I0129 12:21:34.488667 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="e0312663-6b29-4d51-a91d-7e2caf2ef12d" containerName="extract" Jan 29 12:21:34 crc kubenswrapper[4753]: E0129 12:21:34.488691 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e0312663-6b29-4d51-a91d-7e2caf2ef12d" containerName="pull" Jan 29 12:21:34 crc kubenswrapper[4753]: I0129 12:21:34.488697 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="e0312663-6b29-4d51-a91d-7e2caf2ef12d" containerName="pull" Jan 29 12:21:34 crc kubenswrapper[4753]: E0129 12:21:34.488709 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e0312663-6b29-4d51-a91d-7e2caf2ef12d" containerName="util" Jan 29 12:21:34 crc kubenswrapper[4753]: I0129 12:21:34.488716 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="e0312663-6b29-4d51-a91d-7e2caf2ef12d" containerName="util" Jan 29 12:21:34 crc kubenswrapper[4753]: I0129 12:21:34.488887 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="e0312663-6b29-4d51-a91d-7e2caf2ef12d" containerName="extract" Jan 29 12:21:34 crc kubenswrapper[4753]: I0129 12:21:34.497281 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5z7kh" Jan 29 12:21:34 crc kubenswrapper[4753]: I0129 12:21:34.501731 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-5z7kh"] Jan 29 12:21:34 crc kubenswrapper[4753]: I0129 12:21:34.665601 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jgx6j\" (UniqueName: \"kubernetes.io/projected/f2676362-6557-45f7-be21-fd3678472677-kube-api-access-jgx6j\") pod \"redhat-marketplace-5z7kh\" (UID: \"f2676362-6557-45f7-be21-fd3678472677\") " pod="openshift-marketplace/redhat-marketplace-5z7kh" Jan 29 12:21:34 crc kubenswrapper[4753]: I0129 12:21:34.665821 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f2676362-6557-45f7-be21-fd3678472677-catalog-content\") pod \"redhat-marketplace-5z7kh\" (UID: \"f2676362-6557-45f7-be21-fd3678472677\") " pod="openshift-marketplace/redhat-marketplace-5z7kh" Jan 29 12:21:34 crc kubenswrapper[4753]: I0129 12:21:34.665848 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f2676362-6557-45f7-be21-fd3678472677-utilities\") pod \"redhat-marketplace-5z7kh\" (UID: \"f2676362-6557-45f7-be21-fd3678472677\") " pod="openshift-marketplace/redhat-marketplace-5z7kh" Jan 29 12:21:34 crc kubenswrapper[4753]: I0129 12:21:34.766867 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f2676362-6557-45f7-be21-fd3678472677-catalog-content\") pod \"redhat-marketplace-5z7kh\" (UID: \"f2676362-6557-45f7-be21-fd3678472677\") " pod="openshift-marketplace/redhat-marketplace-5z7kh" Jan 29 12:21:34 crc kubenswrapper[4753]: I0129 12:21:34.767217 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f2676362-6557-45f7-be21-fd3678472677-utilities\") pod \"redhat-marketplace-5z7kh\" (UID: \"f2676362-6557-45f7-be21-fd3678472677\") " pod="openshift-marketplace/redhat-marketplace-5z7kh" Jan 29 12:21:34 crc kubenswrapper[4753]: I0129 12:21:34.767359 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jgx6j\" (UniqueName: \"kubernetes.io/projected/f2676362-6557-45f7-be21-fd3678472677-kube-api-access-jgx6j\") pod \"redhat-marketplace-5z7kh\" (UID: \"f2676362-6557-45f7-be21-fd3678472677\") " pod="openshift-marketplace/redhat-marketplace-5z7kh" Jan 29 12:21:34 crc kubenswrapper[4753]: I0129 12:21:34.767853 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f2676362-6557-45f7-be21-fd3678472677-catalog-content\") pod \"redhat-marketplace-5z7kh\" (UID: \"f2676362-6557-45f7-be21-fd3678472677\") " pod="openshift-marketplace/redhat-marketplace-5z7kh" Jan 29 12:21:34 crc kubenswrapper[4753]: I0129 12:21:34.767955 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f2676362-6557-45f7-be21-fd3678472677-utilities\") pod \"redhat-marketplace-5z7kh\" (UID: \"f2676362-6557-45f7-be21-fd3678472677\") " pod="openshift-marketplace/redhat-marketplace-5z7kh" Jan 29 12:21:34 crc kubenswrapper[4753]: I0129 12:21:34.791788 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jgx6j\" (UniqueName: \"kubernetes.io/projected/f2676362-6557-45f7-be21-fd3678472677-kube-api-access-jgx6j\") pod \"redhat-marketplace-5z7kh\" (UID: \"f2676362-6557-45f7-be21-fd3678472677\") " pod="openshift-marketplace/redhat-marketplace-5z7kh" Jan 29 12:21:34 crc kubenswrapper[4753]: I0129 12:21:34.811861 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5z7kh" Jan 29 12:21:35 crc kubenswrapper[4753]: I0129 12:21:35.318815 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-5z7kh"] Jan 29 12:21:36 crc kubenswrapper[4753]: I0129 12:21:36.269809 4753 generic.go:334] "Generic (PLEG): container finished" podID="f2676362-6557-45f7-be21-fd3678472677" containerID="68a83bce9d7d619be70da7b52d056bb5292057f9e6b3b246b7316cf5d9574ba8" exitCode=0 Jan 29 12:21:36 crc kubenswrapper[4753]: I0129 12:21:36.269869 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5z7kh" event={"ID":"f2676362-6557-45f7-be21-fd3678472677","Type":"ContainerDied","Data":"68a83bce9d7d619be70da7b52d056bb5292057f9e6b3b246b7316cf5d9574ba8"} Jan 29 12:21:36 crc kubenswrapper[4753]: I0129 12:21:36.269908 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5z7kh" event={"ID":"f2676362-6557-45f7-be21-fd3678472677","Type":"ContainerStarted","Data":"4274b71764ee20b6d890758140ad3bc49cd3445e083b14e06ad3c1060f5dbbff"} Jan 29 12:21:38 crc kubenswrapper[4753]: I0129 12:21:38.283814 4753 generic.go:334] "Generic (PLEG): container finished" podID="f2676362-6557-45f7-be21-fd3678472677" containerID="2dfe020cb646128123ed019a7a9b58a6faaa4971157e8ede6ef15e267afa388b" exitCode=0 Jan 29 12:21:38 crc kubenswrapper[4753]: I0129 12:21:38.283876 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5z7kh" event={"ID":"f2676362-6557-45f7-be21-fd3678472677","Type":"ContainerDied","Data":"2dfe020cb646128123ed019a7a9b58a6faaa4971157e8ede6ef15e267afa388b"} Jan 29 12:21:39 crc kubenswrapper[4753]: I0129 12:21:39.322379 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5z7kh" event={"ID":"f2676362-6557-45f7-be21-fd3678472677","Type":"ContainerStarted","Data":"7568ffa25cddc560243813a2d84d7150d7d1882d0af8f879ff420480f5540358"} Jan 29 12:21:39 crc kubenswrapper[4753]: I0129 12:21:39.354649 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-5z7kh" podStartSLOduration=2.923578288 podStartE2EDuration="5.354612986s" podCreationTimestamp="2026-01-29 12:21:34 +0000 UTC" firstStartedPulling="2026-01-29 12:21:36.27200961 +0000 UTC m=+910.524091065" lastFinishedPulling="2026-01-29 12:21:38.703044308 +0000 UTC m=+912.955125763" observedRunningTime="2026-01-29 12:21:39.350057377 +0000 UTC m=+913.602138832" watchObservedRunningTime="2026-01-29 12:21:39.354612986 +0000 UTC m=+913.606694441" Jan 29 12:21:40 crc kubenswrapper[4753]: I0129 12:21:40.698083 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-8d6c56984-hp786"] Jan 29 12:21:40 crc kubenswrapper[4753]: I0129 12:21:40.699472 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-8d6c56984-hp786" Jan 29 12:21:40 crc kubenswrapper[4753]: I0129 12:21:40.703158 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Jan 29 12:21:40 crc kubenswrapper[4753]: I0129 12:21:40.703663 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-service-cert" Jan 29 12:21:40 crc kubenswrapper[4753]: I0129 12:21:40.704153 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-dj96f" Jan 29 12:21:40 crc kubenswrapper[4753]: I0129 12:21:40.757275 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-8d6c56984-hp786"] Jan 29 12:21:40 crc kubenswrapper[4753]: I0129 12:21:40.835950 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g9dsc\" (UniqueName: \"kubernetes.io/projected/0a802cf6-528b-4923-baa7-a3eb25bf9bad-kube-api-access-g9dsc\") pod \"mariadb-operator-controller-manager-8d6c56984-hp786\" (UID: \"0a802cf6-528b-4923-baa7-a3eb25bf9bad\") " pod="openstack-operators/mariadb-operator-controller-manager-8d6c56984-hp786" Jan 29 12:21:40 crc kubenswrapper[4753]: I0129 12:21:40.836085 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/0a802cf6-528b-4923-baa7-a3eb25bf9bad-apiservice-cert\") pod \"mariadb-operator-controller-manager-8d6c56984-hp786\" (UID: \"0a802cf6-528b-4923-baa7-a3eb25bf9bad\") " pod="openstack-operators/mariadb-operator-controller-manager-8d6c56984-hp786" Jan 29 12:21:40 crc kubenswrapper[4753]: I0129 12:21:40.836104 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/0a802cf6-528b-4923-baa7-a3eb25bf9bad-webhook-cert\") pod \"mariadb-operator-controller-manager-8d6c56984-hp786\" (UID: \"0a802cf6-528b-4923-baa7-a3eb25bf9bad\") " pod="openstack-operators/mariadb-operator-controller-manager-8d6c56984-hp786" Jan 29 12:21:40 crc kubenswrapper[4753]: I0129 12:21:40.937768 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/0a802cf6-528b-4923-baa7-a3eb25bf9bad-apiservice-cert\") pod \"mariadb-operator-controller-manager-8d6c56984-hp786\" (UID: \"0a802cf6-528b-4923-baa7-a3eb25bf9bad\") " pod="openstack-operators/mariadb-operator-controller-manager-8d6c56984-hp786" Jan 29 12:21:40 crc kubenswrapper[4753]: I0129 12:21:40.937837 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/0a802cf6-528b-4923-baa7-a3eb25bf9bad-webhook-cert\") pod \"mariadb-operator-controller-manager-8d6c56984-hp786\" (UID: \"0a802cf6-528b-4923-baa7-a3eb25bf9bad\") " pod="openstack-operators/mariadb-operator-controller-manager-8d6c56984-hp786" Jan 29 12:21:40 crc kubenswrapper[4753]: I0129 12:21:40.937870 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g9dsc\" (UniqueName: \"kubernetes.io/projected/0a802cf6-528b-4923-baa7-a3eb25bf9bad-kube-api-access-g9dsc\") pod \"mariadb-operator-controller-manager-8d6c56984-hp786\" (UID: \"0a802cf6-528b-4923-baa7-a3eb25bf9bad\") " pod="openstack-operators/mariadb-operator-controller-manager-8d6c56984-hp786" Jan 29 12:21:40 crc kubenswrapper[4753]: I0129 12:21:40.955130 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/0a802cf6-528b-4923-baa7-a3eb25bf9bad-apiservice-cert\") pod \"mariadb-operator-controller-manager-8d6c56984-hp786\" (UID: \"0a802cf6-528b-4923-baa7-a3eb25bf9bad\") " pod="openstack-operators/mariadb-operator-controller-manager-8d6c56984-hp786" Jan 29 12:21:40 crc kubenswrapper[4753]: I0129 12:21:40.955629 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/0a802cf6-528b-4923-baa7-a3eb25bf9bad-webhook-cert\") pod \"mariadb-operator-controller-manager-8d6c56984-hp786\" (UID: \"0a802cf6-528b-4923-baa7-a3eb25bf9bad\") " pod="openstack-operators/mariadb-operator-controller-manager-8d6c56984-hp786" Jan 29 12:21:40 crc kubenswrapper[4753]: I0129 12:21:40.980405 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g9dsc\" (UniqueName: \"kubernetes.io/projected/0a802cf6-528b-4923-baa7-a3eb25bf9bad-kube-api-access-g9dsc\") pod \"mariadb-operator-controller-manager-8d6c56984-hp786\" (UID: \"0a802cf6-528b-4923-baa7-a3eb25bf9bad\") " pod="openstack-operators/mariadb-operator-controller-manager-8d6c56984-hp786" Jan 29 12:21:41 crc kubenswrapper[4753]: I0129 12:21:41.019621 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-8d6c56984-hp786" Jan 29 12:21:41 crc kubenswrapper[4753]: I0129 12:21:41.465957 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-8d6c56984-hp786"] Jan 29 12:21:41 crc kubenswrapper[4753]: W0129 12:21:41.475115 4753 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0a802cf6_528b_4923_baa7_a3eb25bf9bad.slice/crio-452cf0440cebcc3f572a85848234b39d64887d24c912979521258cc0c0233867 WatchSource:0}: Error finding container 452cf0440cebcc3f572a85848234b39d64887d24c912979521258cc0c0233867: Status 404 returned error can't find the container with id 452cf0440cebcc3f572a85848234b39d64887d24c912979521258cc0c0233867 Jan 29 12:21:42 crc kubenswrapper[4753]: I0129 12:21:42.351687 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-8d6c56984-hp786" event={"ID":"0a802cf6-528b-4923-baa7-a3eb25bf9bad","Type":"ContainerStarted","Data":"452cf0440cebcc3f572a85848234b39d64887d24c912979521258cc0c0233867"} Jan 29 12:21:44 crc kubenswrapper[4753]: I0129 12:21:44.935984 4753 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-5z7kh" Jan 29 12:21:44 crc kubenswrapper[4753]: I0129 12:21:44.936035 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-5z7kh" Jan 29 12:21:44 crc kubenswrapper[4753]: I0129 12:21:44.985191 4753 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-5z7kh" Jan 29 12:21:45 crc kubenswrapper[4753]: I0129 12:21:45.419028 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-5z7kh" Jan 29 12:21:46 crc kubenswrapper[4753]: I0129 12:21:46.401279 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-8d6c56984-hp786" event={"ID":"0a802cf6-528b-4923-baa7-a3eb25bf9bad","Type":"ContainerStarted","Data":"f0979a0f20cf1dd20af8d2287b74ef55439e85192334188602ab9c867f565bc1"} Jan 29 12:21:46 crc kubenswrapper[4753]: I0129 12:21:46.401727 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-8d6c56984-hp786" Jan 29 12:21:46 crc kubenswrapper[4753]: I0129 12:21:46.434529 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-controller-manager-8d6c56984-hp786" podStartSLOduration=2.454512694 podStartE2EDuration="6.434502672s" podCreationTimestamp="2026-01-29 12:21:40 +0000 UTC" firstStartedPulling="2026-01-29 12:21:41.478719651 +0000 UTC m=+915.730801106" lastFinishedPulling="2026-01-29 12:21:45.458709629 +0000 UTC m=+919.710791084" observedRunningTime="2026-01-29 12:21:46.430729316 +0000 UTC m=+920.682810761" watchObservedRunningTime="2026-01-29 12:21:46.434502672 +0000 UTC m=+920.686584127" Jan 29 12:21:47 crc kubenswrapper[4753]: I0129 12:21:47.237611 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-5z7kh"] Jan 29 12:21:47 crc kubenswrapper[4753]: I0129 12:21:47.407321 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-5z7kh" podUID="f2676362-6557-45f7-be21-fd3678472677" containerName="registry-server" containerID="cri-o://7568ffa25cddc560243813a2d84d7150d7d1882d0af8f879ff420480f5540358" gracePeriod=2 Jan 29 12:21:48 crc kubenswrapper[4753]: I0129 12:21:48.049947 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5z7kh" Jan 29 12:21:48 crc kubenswrapper[4753]: I0129 12:21:48.147655 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f2676362-6557-45f7-be21-fd3678472677-utilities\") pod \"f2676362-6557-45f7-be21-fd3678472677\" (UID: \"f2676362-6557-45f7-be21-fd3678472677\") " Jan 29 12:21:48 crc kubenswrapper[4753]: I0129 12:21:48.147739 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jgx6j\" (UniqueName: \"kubernetes.io/projected/f2676362-6557-45f7-be21-fd3678472677-kube-api-access-jgx6j\") pod \"f2676362-6557-45f7-be21-fd3678472677\" (UID: \"f2676362-6557-45f7-be21-fd3678472677\") " Jan 29 12:21:48 crc kubenswrapper[4753]: I0129 12:21:48.147785 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f2676362-6557-45f7-be21-fd3678472677-catalog-content\") pod \"f2676362-6557-45f7-be21-fd3678472677\" (UID: \"f2676362-6557-45f7-be21-fd3678472677\") " Jan 29 12:21:48 crc kubenswrapper[4753]: I0129 12:21:48.148907 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f2676362-6557-45f7-be21-fd3678472677-utilities" (OuterVolumeSpecName: "utilities") pod "f2676362-6557-45f7-be21-fd3678472677" (UID: "f2676362-6557-45f7-be21-fd3678472677"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:21:48 crc kubenswrapper[4753]: I0129 12:21:48.168685 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f2676362-6557-45f7-be21-fd3678472677-kube-api-access-jgx6j" (OuterVolumeSpecName: "kube-api-access-jgx6j") pod "f2676362-6557-45f7-be21-fd3678472677" (UID: "f2676362-6557-45f7-be21-fd3678472677"). InnerVolumeSpecName "kube-api-access-jgx6j". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:21:48 crc kubenswrapper[4753]: I0129 12:21:48.176421 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f2676362-6557-45f7-be21-fd3678472677-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f2676362-6557-45f7-be21-fd3678472677" (UID: "f2676362-6557-45f7-be21-fd3678472677"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:21:48 crc kubenswrapper[4753]: I0129 12:21:48.248741 4753 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f2676362-6557-45f7-be21-fd3678472677-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 12:21:48 crc kubenswrapper[4753]: I0129 12:21:48.249038 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jgx6j\" (UniqueName: \"kubernetes.io/projected/f2676362-6557-45f7-be21-fd3678472677-kube-api-access-jgx6j\") on node \"crc\" DevicePath \"\"" Jan 29 12:21:48 crc kubenswrapper[4753]: I0129 12:21:48.249104 4753 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f2676362-6557-45f7-be21-fd3678472677-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 12:21:48 crc kubenswrapper[4753]: I0129 12:21:48.417746 4753 generic.go:334] "Generic (PLEG): container finished" podID="f2676362-6557-45f7-be21-fd3678472677" containerID="7568ffa25cddc560243813a2d84d7150d7d1882d0af8f879ff420480f5540358" exitCode=0 Jan 29 12:21:48 crc kubenswrapper[4753]: I0129 12:21:48.417788 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5z7kh" event={"ID":"f2676362-6557-45f7-be21-fd3678472677","Type":"ContainerDied","Data":"7568ffa25cddc560243813a2d84d7150d7d1882d0af8f879ff420480f5540358"} Jan 29 12:21:48 crc kubenswrapper[4753]: I0129 12:21:48.417869 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5z7kh" Jan 29 12:21:48 crc kubenswrapper[4753]: I0129 12:21:48.418076 4753 scope.go:117] "RemoveContainer" containerID="7568ffa25cddc560243813a2d84d7150d7d1882d0af8f879ff420480f5540358" Jan 29 12:21:48 crc kubenswrapper[4753]: I0129 12:21:48.418039 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5z7kh" event={"ID":"f2676362-6557-45f7-be21-fd3678472677","Type":"ContainerDied","Data":"4274b71764ee20b6d890758140ad3bc49cd3445e083b14e06ad3c1060f5dbbff"} Jan 29 12:21:48 crc kubenswrapper[4753]: I0129 12:21:48.436795 4753 scope.go:117] "RemoveContainer" containerID="2dfe020cb646128123ed019a7a9b58a6faaa4971157e8ede6ef15e267afa388b" Jan 29 12:21:48 crc kubenswrapper[4753]: I0129 12:21:48.454552 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-5z7kh"] Jan 29 12:21:48 crc kubenswrapper[4753]: I0129 12:21:48.462334 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-5z7kh"] Jan 29 12:21:48 crc kubenswrapper[4753]: I0129 12:21:48.470595 4753 scope.go:117] "RemoveContainer" containerID="68a83bce9d7d619be70da7b52d056bb5292057f9e6b3b246b7316cf5d9574ba8" Jan 29 12:21:48 crc kubenswrapper[4753]: I0129 12:21:48.487997 4753 scope.go:117] "RemoveContainer" containerID="7568ffa25cddc560243813a2d84d7150d7d1882d0af8f879ff420480f5540358" Jan 29 12:21:48 crc kubenswrapper[4753]: E0129 12:21:48.488695 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7568ffa25cddc560243813a2d84d7150d7d1882d0af8f879ff420480f5540358\": container with ID starting with 7568ffa25cddc560243813a2d84d7150d7d1882d0af8f879ff420480f5540358 not found: ID does not exist" containerID="7568ffa25cddc560243813a2d84d7150d7d1882d0af8f879ff420480f5540358" Jan 29 12:21:48 crc kubenswrapper[4753]: I0129 12:21:48.488744 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7568ffa25cddc560243813a2d84d7150d7d1882d0af8f879ff420480f5540358"} err="failed to get container status \"7568ffa25cddc560243813a2d84d7150d7d1882d0af8f879ff420480f5540358\": rpc error: code = NotFound desc = could not find container \"7568ffa25cddc560243813a2d84d7150d7d1882d0af8f879ff420480f5540358\": container with ID starting with 7568ffa25cddc560243813a2d84d7150d7d1882d0af8f879ff420480f5540358 not found: ID does not exist" Jan 29 12:21:48 crc kubenswrapper[4753]: I0129 12:21:48.488778 4753 scope.go:117] "RemoveContainer" containerID="2dfe020cb646128123ed019a7a9b58a6faaa4971157e8ede6ef15e267afa388b" Jan 29 12:21:48 crc kubenswrapper[4753]: E0129 12:21:48.489189 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2dfe020cb646128123ed019a7a9b58a6faaa4971157e8ede6ef15e267afa388b\": container with ID starting with 2dfe020cb646128123ed019a7a9b58a6faaa4971157e8ede6ef15e267afa388b not found: ID does not exist" containerID="2dfe020cb646128123ed019a7a9b58a6faaa4971157e8ede6ef15e267afa388b" Jan 29 12:21:48 crc kubenswrapper[4753]: I0129 12:21:48.489213 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2dfe020cb646128123ed019a7a9b58a6faaa4971157e8ede6ef15e267afa388b"} err="failed to get container status \"2dfe020cb646128123ed019a7a9b58a6faaa4971157e8ede6ef15e267afa388b\": rpc error: code = NotFound desc = could not find container \"2dfe020cb646128123ed019a7a9b58a6faaa4971157e8ede6ef15e267afa388b\": container with ID starting with 2dfe020cb646128123ed019a7a9b58a6faaa4971157e8ede6ef15e267afa388b not found: ID does not exist" Jan 29 12:21:48 crc kubenswrapper[4753]: I0129 12:21:48.489307 4753 scope.go:117] "RemoveContainer" containerID="68a83bce9d7d619be70da7b52d056bb5292057f9e6b3b246b7316cf5d9574ba8" Jan 29 12:21:48 crc kubenswrapper[4753]: E0129 12:21:48.489545 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"68a83bce9d7d619be70da7b52d056bb5292057f9e6b3b246b7316cf5d9574ba8\": container with ID starting with 68a83bce9d7d619be70da7b52d056bb5292057f9e6b3b246b7316cf5d9574ba8 not found: ID does not exist" containerID="68a83bce9d7d619be70da7b52d056bb5292057f9e6b3b246b7316cf5d9574ba8" Jan 29 12:21:48 crc kubenswrapper[4753]: I0129 12:21:48.489568 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"68a83bce9d7d619be70da7b52d056bb5292057f9e6b3b246b7316cf5d9574ba8"} err="failed to get container status \"68a83bce9d7d619be70da7b52d056bb5292057f9e6b3b246b7316cf5d9574ba8\": rpc error: code = NotFound desc = could not find container \"68a83bce9d7d619be70da7b52d056bb5292057f9e6b3b246b7316cf5d9574ba8\": container with ID starting with 68a83bce9d7d619be70da7b52d056bb5292057f9e6b3b246b7316cf5d9574ba8 not found: ID does not exist" Jan 29 12:21:49 crc kubenswrapper[4753]: I0129 12:21:49.896090 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f2676362-6557-45f7-be21-fd3678472677" path="/var/lib/kubelet/pods/f2676362-6557-45f7-be21-fd3678472677/volumes" Jan 29 12:21:51 crc kubenswrapper[4753]: I0129 12:21:51.023715 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-8d6c56984-hp786" Jan 29 12:21:55 crc kubenswrapper[4753]: I0129 12:21:55.237710 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-index-hp9dh"] Jan 29 12:21:55 crc kubenswrapper[4753]: E0129 12:21:55.238387 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f2676362-6557-45f7-be21-fd3678472677" containerName="registry-server" Jan 29 12:21:55 crc kubenswrapper[4753]: I0129 12:21:55.238400 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="f2676362-6557-45f7-be21-fd3678472677" containerName="registry-server" Jan 29 12:21:55 crc kubenswrapper[4753]: E0129 12:21:55.238412 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f2676362-6557-45f7-be21-fd3678472677" containerName="extract-content" Jan 29 12:21:55 crc kubenswrapper[4753]: I0129 12:21:55.238418 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="f2676362-6557-45f7-be21-fd3678472677" containerName="extract-content" Jan 29 12:21:55 crc kubenswrapper[4753]: E0129 12:21:55.238426 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f2676362-6557-45f7-be21-fd3678472677" containerName="extract-utilities" Jan 29 12:21:55 crc kubenswrapper[4753]: I0129 12:21:55.238432 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="f2676362-6557-45f7-be21-fd3678472677" containerName="extract-utilities" Jan 29 12:21:55 crc kubenswrapper[4753]: I0129 12:21:55.238534 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="f2676362-6557-45f7-be21-fd3678472677" containerName="registry-server" Jan 29 12:21:55 crc kubenswrapper[4753]: I0129 12:21:55.238974 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-index-hp9dh" Jan 29 12:21:55 crc kubenswrapper[4753]: I0129 12:21:55.249898 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-index-dockercfg-2btjl" Jan 29 12:21:55 crc kubenswrapper[4753]: I0129 12:21:55.262753 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-index-hp9dh"] Jan 29 12:21:55 crc kubenswrapper[4753]: I0129 12:21:55.413010 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8rwql\" (UniqueName: \"kubernetes.io/projected/bca6c033-57b7-45c7-9240-9174503817e7-kube-api-access-8rwql\") pod \"infra-operator-index-hp9dh\" (UID: \"bca6c033-57b7-45c7-9240-9174503817e7\") " pod="openstack-operators/infra-operator-index-hp9dh" Jan 29 12:21:55 crc kubenswrapper[4753]: I0129 12:21:55.709969 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8rwql\" (UniqueName: \"kubernetes.io/projected/bca6c033-57b7-45c7-9240-9174503817e7-kube-api-access-8rwql\") pod \"infra-operator-index-hp9dh\" (UID: \"bca6c033-57b7-45c7-9240-9174503817e7\") " pod="openstack-operators/infra-operator-index-hp9dh" Jan 29 12:21:55 crc kubenswrapper[4753]: I0129 12:21:55.737054 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8rwql\" (UniqueName: \"kubernetes.io/projected/bca6c033-57b7-45c7-9240-9174503817e7-kube-api-access-8rwql\") pod \"infra-operator-index-hp9dh\" (UID: \"bca6c033-57b7-45c7-9240-9174503817e7\") " pod="openstack-operators/infra-operator-index-hp9dh" Jan 29 12:21:55 crc kubenswrapper[4753]: I0129 12:21:55.875989 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-index-hp9dh" Jan 29 12:21:56 crc kubenswrapper[4753]: I0129 12:21:56.577304 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-index-hp9dh"] Jan 29 12:21:57 crc kubenswrapper[4753]: I0129 12:21:57.555292 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-index-hp9dh" event={"ID":"bca6c033-57b7-45c7-9240-9174503817e7","Type":"ContainerStarted","Data":"42c6e18eeb60c8653df1689e965e311cfddb1522b68187c01fd94ed2642a4273"} Jan 29 12:21:58 crc kubenswrapper[4753]: I0129 12:21:58.101520 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/infra-operator-index-hp9dh"] Jan 29 12:21:58 crc kubenswrapper[4753]: I0129 12:21:58.563730 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-index-hp9dh" event={"ID":"bca6c033-57b7-45c7-9240-9174503817e7","Type":"ContainerStarted","Data":"b90f37f9d2c5ac569ac6cae8c044c759077357628f79a64a3c05e67dd3380208"} Jan 29 12:21:58 crc kubenswrapper[4753]: I0129 12:21:58.591399 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-index-hp9dh" podStartSLOduration=2.641520276 podStartE2EDuration="3.591370025s" podCreationTimestamp="2026-01-29 12:21:55 +0000 UTC" firstStartedPulling="2026-01-29 12:21:56.599729723 +0000 UTC m=+930.851811178" lastFinishedPulling="2026-01-29 12:21:57.549579472 +0000 UTC m=+931.801660927" observedRunningTime="2026-01-29 12:21:58.584458769 +0000 UTC m=+932.836540224" watchObservedRunningTime="2026-01-29 12:21:58.591370025 +0000 UTC m=+932.843451480" Jan 29 12:21:58 crc kubenswrapper[4753]: I0129 12:21:58.706167 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-index-c6nzn"] Jan 29 12:21:58 crc kubenswrapper[4753]: I0129 12:21:58.707075 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-index-c6nzn" Jan 29 12:21:58 crc kubenswrapper[4753]: I0129 12:21:58.715973 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-index-c6nzn"] Jan 29 12:21:58 crc kubenswrapper[4753]: I0129 12:21:58.847535 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bls8b\" (UniqueName: \"kubernetes.io/projected/c73421ec-f2e6-4378-b4b5-6fcdcbee082b-kube-api-access-bls8b\") pod \"infra-operator-index-c6nzn\" (UID: \"c73421ec-f2e6-4378-b4b5-6fcdcbee082b\") " pod="openstack-operators/infra-operator-index-c6nzn" Jan 29 12:21:58 crc kubenswrapper[4753]: I0129 12:21:58.948841 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bls8b\" (UniqueName: \"kubernetes.io/projected/c73421ec-f2e6-4378-b4b5-6fcdcbee082b-kube-api-access-bls8b\") pod \"infra-operator-index-c6nzn\" (UID: \"c73421ec-f2e6-4378-b4b5-6fcdcbee082b\") " pod="openstack-operators/infra-operator-index-c6nzn" Jan 29 12:21:58 crc kubenswrapper[4753]: I0129 12:21:58.973182 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bls8b\" (UniqueName: \"kubernetes.io/projected/c73421ec-f2e6-4378-b4b5-6fcdcbee082b-kube-api-access-bls8b\") pod \"infra-operator-index-c6nzn\" (UID: \"c73421ec-f2e6-4378-b4b5-6fcdcbee082b\") " pod="openstack-operators/infra-operator-index-c6nzn" Jan 29 12:21:59 crc kubenswrapper[4753]: I0129 12:21:59.027682 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-index-c6nzn" Jan 29 12:21:59 crc kubenswrapper[4753]: I0129 12:21:59.453189 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-index-c6nzn"] Jan 29 12:21:59 crc kubenswrapper[4753]: W0129 12:21:59.461179 4753 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc73421ec_f2e6_4378_b4b5_6fcdcbee082b.slice/crio-2915cfbdf994521b20df8f404382af41fd059ba8e61c75d612e531bd044f2b0a WatchSource:0}: Error finding container 2915cfbdf994521b20df8f404382af41fd059ba8e61c75d612e531bd044f2b0a: Status 404 returned error can't find the container with id 2915cfbdf994521b20df8f404382af41fd059ba8e61c75d612e531bd044f2b0a Jan 29 12:21:59 crc kubenswrapper[4753]: I0129 12:21:59.570780 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-index-c6nzn" event={"ID":"c73421ec-f2e6-4378-b4b5-6fcdcbee082b","Type":"ContainerStarted","Data":"2915cfbdf994521b20df8f404382af41fd059ba8e61c75d612e531bd044f2b0a"} Jan 29 12:21:59 crc kubenswrapper[4753]: I0129 12:21:59.570933 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/infra-operator-index-hp9dh" podUID="bca6c033-57b7-45c7-9240-9174503817e7" containerName="registry-server" containerID="cri-o://b90f37f9d2c5ac569ac6cae8c044c759077357628f79a64a3c05e67dd3380208" gracePeriod=2 Jan 29 12:21:59 crc kubenswrapper[4753]: I0129 12:21:59.919157 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-index-hp9dh" Jan 29 12:22:00 crc kubenswrapper[4753]: I0129 12:22:00.068614 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8rwql\" (UniqueName: \"kubernetes.io/projected/bca6c033-57b7-45c7-9240-9174503817e7-kube-api-access-8rwql\") pod \"bca6c033-57b7-45c7-9240-9174503817e7\" (UID: \"bca6c033-57b7-45c7-9240-9174503817e7\") " Jan 29 12:22:00 crc kubenswrapper[4753]: I0129 12:22:00.084978 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bca6c033-57b7-45c7-9240-9174503817e7-kube-api-access-8rwql" (OuterVolumeSpecName: "kube-api-access-8rwql") pod "bca6c033-57b7-45c7-9240-9174503817e7" (UID: "bca6c033-57b7-45c7-9240-9174503817e7"). InnerVolumeSpecName "kube-api-access-8rwql". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:22:00 crc kubenswrapper[4753]: I0129 12:22:00.170393 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8rwql\" (UniqueName: \"kubernetes.io/projected/bca6c033-57b7-45c7-9240-9174503817e7-kube-api-access-8rwql\") on node \"crc\" DevicePath \"\"" Jan 29 12:22:00 crc kubenswrapper[4753]: I0129 12:22:00.581046 4753 generic.go:334] "Generic (PLEG): container finished" podID="bca6c033-57b7-45c7-9240-9174503817e7" containerID="b90f37f9d2c5ac569ac6cae8c044c759077357628f79a64a3c05e67dd3380208" exitCode=0 Jan 29 12:22:00 crc kubenswrapper[4753]: I0129 12:22:00.581151 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-index-hp9dh" Jan 29 12:22:00 crc kubenswrapper[4753]: I0129 12:22:00.581155 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-index-hp9dh" event={"ID":"bca6c033-57b7-45c7-9240-9174503817e7","Type":"ContainerDied","Data":"b90f37f9d2c5ac569ac6cae8c044c759077357628f79a64a3c05e67dd3380208"} Jan 29 12:22:00 crc kubenswrapper[4753]: I0129 12:22:00.581275 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-index-hp9dh" event={"ID":"bca6c033-57b7-45c7-9240-9174503817e7","Type":"ContainerDied","Data":"42c6e18eeb60c8653df1689e965e311cfddb1522b68187c01fd94ed2642a4273"} Jan 29 12:22:00 crc kubenswrapper[4753]: I0129 12:22:00.581306 4753 scope.go:117] "RemoveContainer" containerID="b90f37f9d2c5ac569ac6cae8c044c759077357628f79a64a3c05e67dd3380208" Jan 29 12:22:00 crc kubenswrapper[4753]: I0129 12:22:00.584528 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-index-c6nzn" event={"ID":"c73421ec-f2e6-4378-b4b5-6fcdcbee082b","Type":"ContainerStarted","Data":"1b59d0f2993ed03888c34a2e23832be3e53542a2eacb5e5294a37745a8f10697"} Jan 29 12:22:00 crc kubenswrapper[4753]: I0129 12:22:00.601737 4753 scope.go:117] "RemoveContainer" containerID="b90f37f9d2c5ac569ac6cae8c044c759077357628f79a64a3c05e67dd3380208" Jan 29 12:22:00 crc kubenswrapper[4753]: E0129 12:22:00.602548 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b90f37f9d2c5ac569ac6cae8c044c759077357628f79a64a3c05e67dd3380208\": container with ID starting with b90f37f9d2c5ac569ac6cae8c044c759077357628f79a64a3c05e67dd3380208 not found: ID does not exist" containerID="b90f37f9d2c5ac569ac6cae8c044c759077357628f79a64a3c05e67dd3380208" Jan 29 12:22:00 crc kubenswrapper[4753]: I0129 12:22:00.602601 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b90f37f9d2c5ac569ac6cae8c044c759077357628f79a64a3c05e67dd3380208"} err="failed to get container status \"b90f37f9d2c5ac569ac6cae8c044c759077357628f79a64a3c05e67dd3380208\": rpc error: code = NotFound desc = could not find container \"b90f37f9d2c5ac569ac6cae8c044c759077357628f79a64a3c05e67dd3380208\": container with ID starting with b90f37f9d2c5ac569ac6cae8c044c759077357628f79a64a3c05e67dd3380208 not found: ID does not exist" Jan 29 12:22:00 crc kubenswrapper[4753]: I0129 12:22:00.603899 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-index-c6nzn" podStartSLOduration=2.196419705 podStartE2EDuration="2.603871877s" podCreationTimestamp="2026-01-29 12:21:58 +0000 UTC" firstStartedPulling="2026-01-29 12:21:59.465573751 +0000 UTC m=+933.717655206" lastFinishedPulling="2026-01-29 12:21:59.873025923 +0000 UTC m=+934.125107378" observedRunningTime="2026-01-29 12:22:00.600616745 +0000 UTC m=+934.852698210" watchObservedRunningTime="2026-01-29 12:22:00.603871877 +0000 UTC m=+934.855953332" Jan 29 12:22:00 crc kubenswrapper[4753]: I0129 12:22:00.616893 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/infra-operator-index-hp9dh"] Jan 29 12:22:00 crc kubenswrapper[4753]: I0129 12:22:00.621474 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/infra-operator-index-hp9dh"] Jan 29 12:22:01 crc kubenswrapper[4753]: I0129 12:22:01.911157 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bca6c033-57b7-45c7-9240-9174503817e7" path="/var/lib/kubelet/pods/bca6c033-57b7-45c7-9240-9174503817e7/volumes" Jan 29 12:22:09 crc kubenswrapper[4753]: I0129 12:22:09.039008 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-index-c6nzn" Jan 29 12:22:09 crc kubenswrapper[4753]: I0129 12:22:09.039665 4753 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/infra-operator-index-c6nzn" Jan 29 12:22:09 crc kubenswrapper[4753]: I0129 12:22:09.111694 4753 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/infra-operator-index-c6nzn" Jan 29 12:22:09 crc kubenswrapper[4753]: I0129 12:22:09.690629 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-index-c6nzn" Jan 29 12:22:11 crc kubenswrapper[4753]: I0129 12:22:11.145919 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f7576q8z85"] Jan 29 12:22:11 crc kubenswrapper[4753]: E0129 12:22:11.146555 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bca6c033-57b7-45c7-9240-9174503817e7" containerName="registry-server" Jan 29 12:22:11 crc kubenswrapper[4753]: I0129 12:22:11.146571 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="bca6c033-57b7-45c7-9240-9174503817e7" containerName="registry-server" Jan 29 12:22:11 crc kubenswrapper[4753]: I0129 12:22:11.146719 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="bca6c033-57b7-45c7-9240-9174503817e7" containerName="registry-server" Jan 29 12:22:11 crc kubenswrapper[4753]: I0129 12:22:11.147660 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f7576q8z85" Jan 29 12:22:11 crc kubenswrapper[4753]: I0129 12:22:11.149728 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-j2jmz" Jan 29 12:22:11 crc kubenswrapper[4753]: I0129 12:22:11.163660 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f7576q8z85"] Jan 29 12:22:11 crc kubenswrapper[4753]: I0129 12:22:11.210204 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pkr9f\" (UniqueName: \"kubernetes.io/projected/00777d2a-950c-42f8-a8e2-9df4cbab7add-kube-api-access-pkr9f\") pod \"d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f7576q8z85\" (UID: \"00777d2a-950c-42f8-a8e2-9df4cbab7add\") " pod="openstack-operators/d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f7576q8z85" Jan 29 12:22:11 crc kubenswrapper[4753]: I0129 12:22:11.210524 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/00777d2a-950c-42f8-a8e2-9df4cbab7add-bundle\") pod \"d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f7576q8z85\" (UID: \"00777d2a-950c-42f8-a8e2-9df4cbab7add\") " pod="openstack-operators/d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f7576q8z85" Jan 29 12:22:11 crc kubenswrapper[4753]: I0129 12:22:11.210711 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/00777d2a-950c-42f8-a8e2-9df4cbab7add-util\") pod \"d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f7576q8z85\" (UID: \"00777d2a-950c-42f8-a8e2-9df4cbab7add\") " pod="openstack-operators/d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f7576q8z85" Jan 29 12:22:11 crc kubenswrapper[4753]: I0129 12:22:11.311466 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pkr9f\" (UniqueName: \"kubernetes.io/projected/00777d2a-950c-42f8-a8e2-9df4cbab7add-kube-api-access-pkr9f\") pod \"d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f7576q8z85\" (UID: \"00777d2a-950c-42f8-a8e2-9df4cbab7add\") " pod="openstack-operators/d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f7576q8z85" Jan 29 12:22:11 crc kubenswrapper[4753]: I0129 12:22:11.311542 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/00777d2a-950c-42f8-a8e2-9df4cbab7add-bundle\") pod \"d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f7576q8z85\" (UID: \"00777d2a-950c-42f8-a8e2-9df4cbab7add\") " pod="openstack-operators/d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f7576q8z85" Jan 29 12:22:11 crc kubenswrapper[4753]: I0129 12:22:11.311586 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/00777d2a-950c-42f8-a8e2-9df4cbab7add-util\") pod \"d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f7576q8z85\" (UID: \"00777d2a-950c-42f8-a8e2-9df4cbab7add\") " pod="openstack-operators/d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f7576q8z85" Jan 29 12:22:11 crc kubenswrapper[4753]: I0129 12:22:11.312217 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/00777d2a-950c-42f8-a8e2-9df4cbab7add-util\") pod \"d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f7576q8z85\" (UID: \"00777d2a-950c-42f8-a8e2-9df4cbab7add\") " pod="openstack-operators/d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f7576q8z85" Jan 29 12:22:11 crc kubenswrapper[4753]: I0129 12:22:11.312691 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/00777d2a-950c-42f8-a8e2-9df4cbab7add-bundle\") pod \"d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f7576q8z85\" (UID: \"00777d2a-950c-42f8-a8e2-9df4cbab7add\") " pod="openstack-operators/d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f7576q8z85" Jan 29 12:22:11 crc kubenswrapper[4753]: I0129 12:22:11.333888 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pkr9f\" (UniqueName: \"kubernetes.io/projected/00777d2a-950c-42f8-a8e2-9df4cbab7add-kube-api-access-pkr9f\") pod \"d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f7576q8z85\" (UID: \"00777d2a-950c-42f8-a8e2-9df4cbab7add\") " pod="openstack-operators/d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f7576q8z85" Jan 29 12:22:11 crc kubenswrapper[4753]: I0129 12:22:11.482679 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f7576q8z85" Jan 29 12:22:12 crc kubenswrapper[4753]: I0129 12:22:12.141572 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f7576q8z85"] Jan 29 12:22:12 crc kubenswrapper[4753]: I0129 12:22:12.870984 4753 generic.go:334] "Generic (PLEG): container finished" podID="00777d2a-950c-42f8-a8e2-9df4cbab7add" containerID="55a7e62f5e99631b1c13bcb20e4cc75a01c6c0b749948a47736ee4222a197102" exitCode=0 Jan 29 12:22:12 crc kubenswrapper[4753]: I0129 12:22:12.871042 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f7576q8z85" event={"ID":"00777d2a-950c-42f8-a8e2-9df4cbab7add","Type":"ContainerDied","Data":"55a7e62f5e99631b1c13bcb20e4cc75a01c6c0b749948a47736ee4222a197102"} Jan 29 12:22:12 crc kubenswrapper[4753]: I0129 12:22:12.871283 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f7576q8z85" event={"ID":"00777d2a-950c-42f8-a8e2-9df4cbab7add","Type":"ContainerStarted","Data":"606c9cc89d90eba6d8eb03d5bc60f4f55d7baa2c962c14187f8cea229dc46f62"} Jan 29 12:22:14 crc kubenswrapper[4753]: I0129 12:22:14.886978 4753 generic.go:334] "Generic (PLEG): container finished" podID="00777d2a-950c-42f8-a8e2-9df4cbab7add" containerID="2e87f456e5cf6f1ead988353be01501ea3e5e591fcda20684f5468291e2da843" exitCode=0 Jan 29 12:22:14 crc kubenswrapper[4753]: I0129 12:22:14.887063 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f7576q8z85" event={"ID":"00777d2a-950c-42f8-a8e2-9df4cbab7add","Type":"ContainerDied","Data":"2e87f456e5cf6f1ead988353be01501ea3e5e591fcda20684f5468291e2da843"} Jan 29 12:22:15 crc kubenswrapper[4753]: I0129 12:22:15.895507 4753 generic.go:334] "Generic (PLEG): container finished" podID="00777d2a-950c-42f8-a8e2-9df4cbab7add" containerID="3bc8557324f62a6150da3afd5be16af1bb3af5b6eb84f912b02c6ee75e81b9e6" exitCode=0 Jan 29 12:22:15 crc kubenswrapper[4753]: I0129 12:22:15.896144 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f7576q8z85" event={"ID":"00777d2a-950c-42f8-a8e2-9df4cbab7add","Type":"ContainerDied","Data":"3bc8557324f62a6150da3afd5be16af1bb3af5b6eb84f912b02c6ee75e81b9e6"} Jan 29 12:22:17 crc kubenswrapper[4753]: I0129 12:22:17.175283 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f7576q8z85" Jan 29 12:22:17 crc kubenswrapper[4753]: I0129 12:22:17.209584 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pkr9f\" (UniqueName: \"kubernetes.io/projected/00777d2a-950c-42f8-a8e2-9df4cbab7add-kube-api-access-pkr9f\") pod \"00777d2a-950c-42f8-a8e2-9df4cbab7add\" (UID: \"00777d2a-950c-42f8-a8e2-9df4cbab7add\") " Jan 29 12:22:17 crc kubenswrapper[4753]: I0129 12:22:17.209687 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/00777d2a-950c-42f8-a8e2-9df4cbab7add-util\") pod \"00777d2a-950c-42f8-a8e2-9df4cbab7add\" (UID: \"00777d2a-950c-42f8-a8e2-9df4cbab7add\") " Jan 29 12:22:17 crc kubenswrapper[4753]: I0129 12:22:17.209723 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/00777d2a-950c-42f8-a8e2-9df4cbab7add-bundle\") pod \"00777d2a-950c-42f8-a8e2-9df4cbab7add\" (UID: \"00777d2a-950c-42f8-a8e2-9df4cbab7add\") " Jan 29 12:22:17 crc kubenswrapper[4753]: I0129 12:22:17.213023 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/00777d2a-950c-42f8-a8e2-9df4cbab7add-bundle" (OuterVolumeSpecName: "bundle") pod "00777d2a-950c-42f8-a8e2-9df4cbab7add" (UID: "00777d2a-950c-42f8-a8e2-9df4cbab7add"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:22:17 crc kubenswrapper[4753]: I0129 12:22:17.216722 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/00777d2a-950c-42f8-a8e2-9df4cbab7add-kube-api-access-pkr9f" (OuterVolumeSpecName: "kube-api-access-pkr9f") pod "00777d2a-950c-42f8-a8e2-9df4cbab7add" (UID: "00777d2a-950c-42f8-a8e2-9df4cbab7add"). InnerVolumeSpecName "kube-api-access-pkr9f". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:22:17 crc kubenswrapper[4753]: I0129 12:22:17.231654 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/00777d2a-950c-42f8-a8e2-9df4cbab7add-util" (OuterVolumeSpecName: "util") pod "00777d2a-950c-42f8-a8e2-9df4cbab7add" (UID: "00777d2a-950c-42f8-a8e2-9df4cbab7add"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:22:17 crc kubenswrapper[4753]: I0129 12:22:17.310849 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pkr9f\" (UniqueName: \"kubernetes.io/projected/00777d2a-950c-42f8-a8e2-9df4cbab7add-kube-api-access-pkr9f\") on node \"crc\" DevicePath \"\"" Jan 29 12:22:17 crc kubenswrapper[4753]: I0129 12:22:17.310893 4753 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/00777d2a-950c-42f8-a8e2-9df4cbab7add-util\") on node \"crc\" DevicePath \"\"" Jan 29 12:22:17 crc kubenswrapper[4753]: I0129 12:22:17.310933 4753 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/00777d2a-950c-42f8-a8e2-9df4cbab7add-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 12:22:17 crc kubenswrapper[4753]: I0129 12:22:17.909645 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f7576q8z85" event={"ID":"00777d2a-950c-42f8-a8e2-9df4cbab7add","Type":"ContainerDied","Data":"606c9cc89d90eba6d8eb03d5bc60f4f55d7baa2c962c14187f8cea229dc46f62"} Jan 29 12:22:17 crc kubenswrapper[4753]: I0129 12:22:17.909731 4753 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="606c9cc89d90eba6d8eb03d5bc60f4f55d7baa2c962c14187f8cea229dc46f62" Jan 29 12:22:17 crc kubenswrapper[4753]: I0129 12:22:17.909972 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f7576q8z85" Jan 29 12:22:27 crc kubenswrapper[4753]: I0129 12:22:27.324892 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-controller-manager-79c7d496c4-zf52n"] Jan 29 12:22:27 crc kubenswrapper[4753]: E0129 12:22:27.325703 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="00777d2a-950c-42f8-a8e2-9df4cbab7add" containerName="pull" Jan 29 12:22:27 crc kubenswrapper[4753]: I0129 12:22:27.325721 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="00777d2a-950c-42f8-a8e2-9df4cbab7add" containerName="pull" Jan 29 12:22:27 crc kubenswrapper[4753]: E0129 12:22:27.325742 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="00777d2a-950c-42f8-a8e2-9df4cbab7add" containerName="extract" Jan 29 12:22:27 crc kubenswrapper[4753]: I0129 12:22:27.325748 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="00777d2a-950c-42f8-a8e2-9df4cbab7add" containerName="extract" Jan 29 12:22:27 crc kubenswrapper[4753]: E0129 12:22:27.325759 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="00777d2a-950c-42f8-a8e2-9df4cbab7add" containerName="util" Jan 29 12:22:27 crc kubenswrapper[4753]: I0129 12:22:27.325765 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="00777d2a-950c-42f8-a8e2-9df4cbab7add" containerName="util" Jan 29 12:22:27 crc kubenswrapper[4753]: I0129 12:22:27.325910 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="00777d2a-950c-42f8-a8e2-9df4cbab7add" containerName="extract" Jan 29 12:22:27 crc kubenswrapper[4753]: I0129 12:22:27.326354 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-79c7d496c4-zf52n" Jan 29 12:22:27 crc kubenswrapper[4753]: W0129 12:22:27.329564 4753 reflector.go:561] object-"openstack-operators"/"infra-operator-controller-manager-service-cert": failed to list *v1.Secret: secrets "infra-operator-controller-manager-service-cert" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openstack-operators": no relationship found between node 'crc' and this object Jan 29 12:22:27 crc kubenswrapper[4753]: W0129 12:22:27.329610 4753 reflector.go:561] object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-hdclg": failed to list *v1.Secret: secrets "infra-operator-controller-manager-dockercfg-hdclg" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openstack-operators": no relationship found between node 'crc' and this object Jan 29 12:22:27 crc kubenswrapper[4753]: E0129 12:22:27.329724 4753 reflector.go:158] "Unhandled Error" err="object-\"openstack-operators\"/\"infra-operator-controller-manager-dockercfg-hdclg\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"infra-operator-controller-manager-dockercfg-hdclg\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openstack-operators\": no relationship found between node 'crc' and this object" logger="UnhandledError" Jan 29 12:22:27 crc kubenswrapper[4753]: E0129 12:22:27.329724 4753 reflector.go:158] "Unhandled Error" err="object-\"openstack-operators\"/\"infra-operator-controller-manager-service-cert\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"infra-operator-controller-manager-service-cert\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openstack-operators\": no relationship found between node 'crc' and this object" logger="UnhandledError" Jan 29 12:22:27 crc kubenswrapper[4753]: I0129 12:22:27.332662 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/6ad1bd9c-bfda-45eb-b89c-0ae519acdfdf-webhook-cert\") pod \"infra-operator-controller-manager-79c7d496c4-zf52n\" (UID: \"6ad1bd9c-bfda-45eb-b89c-0ae519acdfdf\") " pod="openstack-operators/infra-operator-controller-manager-79c7d496c4-zf52n" Jan 29 12:22:27 crc kubenswrapper[4753]: I0129 12:22:27.332710 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/6ad1bd9c-bfda-45eb-b89c-0ae519acdfdf-apiservice-cert\") pod \"infra-operator-controller-manager-79c7d496c4-zf52n\" (UID: \"6ad1bd9c-bfda-45eb-b89c-0ae519acdfdf\") " pod="openstack-operators/infra-operator-controller-manager-79c7d496c4-zf52n" Jan 29 12:22:27 crc kubenswrapper[4753]: I0129 12:22:27.332742 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mmkwk\" (UniqueName: \"kubernetes.io/projected/6ad1bd9c-bfda-45eb-b89c-0ae519acdfdf-kube-api-access-mmkwk\") pod \"infra-operator-controller-manager-79c7d496c4-zf52n\" (UID: \"6ad1bd9c-bfda-45eb-b89c-0ae519acdfdf\") " pod="openstack-operators/infra-operator-controller-manager-79c7d496c4-zf52n" Jan 29 12:22:27 crc kubenswrapper[4753]: I0129 12:22:27.360220 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-79c7d496c4-zf52n"] Jan 29 12:22:27 crc kubenswrapper[4753]: I0129 12:22:27.433776 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/6ad1bd9c-bfda-45eb-b89c-0ae519acdfdf-webhook-cert\") pod \"infra-operator-controller-manager-79c7d496c4-zf52n\" (UID: \"6ad1bd9c-bfda-45eb-b89c-0ae519acdfdf\") " pod="openstack-operators/infra-operator-controller-manager-79c7d496c4-zf52n" Jan 29 12:22:27 crc kubenswrapper[4753]: I0129 12:22:27.434088 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/6ad1bd9c-bfda-45eb-b89c-0ae519acdfdf-apiservice-cert\") pod \"infra-operator-controller-manager-79c7d496c4-zf52n\" (UID: \"6ad1bd9c-bfda-45eb-b89c-0ae519acdfdf\") " pod="openstack-operators/infra-operator-controller-manager-79c7d496c4-zf52n" Jan 29 12:22:27 crc kubenswrapper[4753]: I0129 12:22:27.434192 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mmkwk\" (UniqueName: \"kubernetes.io/projected/6ad1bd9c-bfda-45eb-b89c-0ae519acdfdf-kube-api-access-mmkwk\") pod \"infra-operator-controller-manager-79c7d496c4-zf52n\" (UID: \"6ad1bd9c-bfda-45eb-b89c-0ae519acdfdf\") " pod="openstack-operators/infra-operator-controller-manager-79c7d496c4-zf52n" Jan 29 12:22:27 crc kubenswrapper[4753]: I0129 12:22:27.460449 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mmkwk\" (UniqueName: \"kubernetes.io/projected/6ad1bd9c-bfda-45eb-b89c-0ae519acdfdf-kube-api-access-mmkwk\") pod \"infra-operator-controller-manager-79c7d496c4-zf52n\" (UID: \"6ad1bd9c-bfda-45eb-b89c-0ae519acdfdf\") " pod="openstack-operators/infra-operator-controller-manager-79c7d496c4-zf52n" Jan 29 12:22:27 crc kubenswrapper[4753]: I0129 12:22:27.579705 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["swift-kuttl-tests/openstack-galera-0"] Jan 29 12:22:27 crc kubenswrapper[4753]: I0129 12:22:27.580966 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/openstack-galera-0" Jan 29 12:22:27 crc kubenswrapper[4753]: I0129 12:22:27.583692 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"swift-kuttl-tests"/"openshift-service-ca.crt" Jan 29 12:22:27 crc kubenswrapper[4753]: I0129 12:22:27.583767 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"swift-kuttl-tests"/"kube-root-ca.crt" Jan 29 12:22:27 crc kubenswrapper[4753]: I0129 12:22:27.583942 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"swift-kuttl-tests"/"openstack-scripts" Jan 29 12:22:27 crc kubenswrapper[4753]: I0129 12:22:27.584076 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"swift-kuttl-tests"/"openstack-config-data" Jan 29 12:22:27 crc kubenswrapper[4753]: I0129 12:22:27.586942 4753 reflector.go:368] Caches populated for *v1.Secret from object-"swift-kuttl-tests"/"galera-openstack-dockercfg-b629c" Jan 29 12:22:27 crc kubenswrapper[4753]: I0129 12:22:27.585217 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["swift-kuttl-tests/openstack-galera-1"] Jan 29 12:22:27 crc kubenswrapper[4753]: I0129 12:22:27.767483 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/openstack-galera-0"] Jan 29 12:22:27 crc kubenswrapper[4753]: I0129 12:22:27.767626 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/openstack-galera-1" Jan 29 12:22:27 crc kubenswrapper[4753]: I0129 12:22:27.791173 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["swift-kuttl-tests/openstack-galera-2"] Jan 29 12:22:27 crc kubenswrapper[4753]: I0129 12:22:27.798988 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/openstack-galera-1"] Jan 29 12:22:27 crc kubenswrapper[4753]: I0129 12:22:27.799183 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/openstack-galera-2" Jan 29 12:22:27 crc kubenswrapper[4753]: I0129 12:22:27.809948 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/openstack-galera-2"] Jan 29 12:22:27 crc kubenswrapper[4753]: I0129 12:22:27.869462 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"openstack-galera-0\" (UID: \"6762f214-cfd1-4314-9fa0-1d2b40e87b4e\") " pod="swift-kuttl-tests/openstack-galera-0" Jan 29 12:22:27 crc kubenswrapper[4753]: I0129 12:22:27.869518 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6762f214-cfd1-4314-9fa0-1d2b40e87b4e-operator-scripts\") pod \"openstack-galera-0\" (UID: \"6762f214-cfd1-4314-9fa0-1d2b40e87b4e\") " pod="swift-kuttl-tests/openstack-galera-0" Jan 29 12:22:27 crc kubenswrapper[4753]: I0129 12:22:27.869583 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/6762f214-cfd1-4314-9fa0-1d2b40e87b4e-config-data-generated\") pod \"openstack-galera-0\" (UID: \"6762f214-cfd1-4314-9fa0-1d2b40e87b4e\") " pod="swift-kuttl-tests/openstack-galera-0" Jan 29 12:22:27 crc kubenswrapper[4753]: I0129 12:22:27.869614 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/6762f214-cfd1-4314-9fa0-1d2b40e87b4e-kolla-config\") pod \"openstack-galera-0\" (UID: \"6762f214-cfd1-4314-9fa0-1d2b40e87b4e\") " pod="swift-kuttl-tests/openstack-galera-0" Jan 29 12:22:27 crc kubenswrapper[4753]: I0129 12:22:27.869637 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qckl8\" (UniqueName: \"kubernetes.io/projected/6762f214-cfd1-4314-9fa0-1d2b40e87b4e-kube-api-access-qckl8\") pod \"openstack-galera-0\" (UID: \"6762f214-cfd1-4314-9fa0-1d2b40e87b4e\") " pod="swift-kuttl-tests/openstack-galera-0" Jan 29 12:22:27 crc kubenswrapper[4753]: I0129 12:22:27.869700 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/6762f214-cfd1-4314-9fa0-1d2b40e87b4e-config-data-default\") pod \"openstack-galera-0\" (UID: \"6762f214-cfd1-4314-9fa0-1d2b40e87b4e\") " pod="swift-kuttl-tests/openstack-galera-0" Jan 29 12:22:27 crc kubenswrapper[4753]: I0129 12:22:27.973127 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/89e38fc2-9f07-45f0-8cdc-b77931872d7b-config-data-default\") pod \"openstack-galera-1\" (UID: \"89e38fc2-9f07-45f0-8cdc-b77931872d7b\") " pod="swift-kuttl-tests/openstack-galera-1" Jan 29 12:22:27 crc kubenswrapper[4753]: I0129 12:22:27.973211 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cc8c9706-0165-4cc6-ad22-bc70f03d5bf9-operator-scripts\") pod \"openstack-galera-2\" (UID: \"cc8c9706-0165-4cc6-ad22-bc70f03d5bf9\") " pod="swift-kuttl-tests/openstack-galera-2" Jan 29 12:22:27 crc kubenswrapper[4753]: I0129 12:22:27.973266 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/cc8c9706-0165-4cc6-ad22-bc70f03d5bf9-kolla-config\") pod \"openstack-galera-2\" (UID: \"cc8c9706-0165-4cc6-ad22-bc70f03d5bf9\") " pod="swift-kuttl-tests/openstack-galera-2" Jan 29 12:22:27 crc kubenswrapper[4753]: I0129 12:22:27.973301 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"openstack-galera-1\" (UID: \"89e38fc2-9f07-45f0-8cdc-b77931872d7b\") " pod="swift-kuttl-tests/openstack-galera-1" Jan 29 12:22:27 crc kubenswrapper[4753]: I0129 12:22:27.973346 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/89e38fc2-9f07-45f0-8cdc-b77931872d7b-kolla-config\") pod \"openstack-galera-1\" (UID: \"89e38fc2-9f07-45f0-8cdc-b77931872d7b\") " pod="swift-kuttl-tests/openstack-galera-1" Jan 29 12:22:27 crc kubenswrapper[4753]: I0129 12:22:27.973381 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/6762f214-cfd1-4314-9fa0-1d2b40e87b4e-config-data-default\") pod \"openstack-galera-0\" (UID: \"6762f214-cfd1-4314-9fa0-1d2b40e87b4e\") " pod="swift-kuttl-tests/openstack-galera-0" Jan 29 12:22:27 crc kubenswrapper[4753]: I0129 12:22:27.973443 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"openstack-galera-0\" (UID: \"6762f214-cfd1-4314-9fa0-1d2b40e87b4e\") " pod="swift-kuttl-tests/openstack-galera-0" Jan 29 12:22:27 crc kubenswrapper[4753]: I0129 12:22:27.973470 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/cc8c9706-0165-4cc6-ad22-bc70f03d5bf9-config-data-default\") pod \"openstack-galera-2\" (UID: \"cc8c9706-0165-4cc6-ad22-bc70f03d5bf9\") " pod="swift-kuttl-tests/openstack-galera-2" Jan 29 12:22:27 crc kubenswrapper[4753]: I0129 12:22:27.973494 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q4dzf\" (UniqueName: \"kubernetes.io/projected/cc8c9706-0165-4cc6-ad22-bc70f03d5bf9-kube-api-access-q4dzf\") pod \"openstack-galera-2\" (UID: \"cc8c9706-0165-4cc6-ad22-bc70f03d5bf9\") " pod="swift-kuttl-tests/openstack-galera-2" Jan 29 12:22:27 crc kubenswrapper[4753]: I0129 12:22:27.973521 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6762f214-cfd1-4314-9fa0-1d2b40e87b4e-operator-scripts\") pod \"openstack-galera-0\" (UID: \"6762f214-cfd1-4314-9fa0-1d2b40e87b4e\") " pod="swift-kuttl-tests/openstack-galera-0" Jan 29 12:22:27 crc kubenswrapper[4753]: I0129 12:22:27.973557 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/89e38fc2-9f07-45f0-8cdc-b77931872d7b-config-data-generated\") pod \"openstack-galera-1\" (UID: \"89e38fc2-9f07-45f0-8cdc-b77931872d7b\") " pod="swift-kuttl-tests/openstack-galera-1" Jan 29 12:22:27 crc kubenswrapper[4753]: I0129 12:22:27.973598 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/6762f214-cfd1-4314-9fa0-1d2b40e87b4e-config-data-generated\") pod \"openstack-galera-0\" (UID: \"6762f214-cfd1-4314-9fa0-1d2b40e87b4e\") " pod="swift-kuttl-tests/openstack-galera-0" Jan 29 12:22:27 crc kubenswrapper[4753]: I0129 12:22:27.973616 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/cc8c9706-0165-4cc6-ad22-bc70f03d5bf9-config-data-generated\") pod \"openstack-galera-2\" (UID: \"cc8c9706-0165-4cc6-ad22-bc70f03d5bf9\") " pod="swift-kuttl-tests/openstack-galera-2" Jan 29 12:22:27 crc kubenswrapper[4753]: I0129 12:22:27.973634 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6zlck\" (UniqueName: \"kubernetes.io/projected/89e38fc2-9f07-45f0-8cdc-b77931872d7b-kube-api-access-6zlck\") pod \"openstack-galera-1\" (UID: \"89e38fc2-9f07-45f0-8cdc-b77931872d7b\") " pod="swift-kuttl-tests/openstack-galera-1" Jan 29 12:22:27 crc kubenswrapper[4753]: I0129 12:22:27.973656 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/6762f214-cfd1-4314-9fa0-1d2b40e87b4e-kolla-config\") pod \"openstack-galera-0\" (UID: \"6762f214-cfd1-4314-9fa0-1d2b40e87b4e\") " pod="swift-kuttl-tests/openstack-galera-0" Jan 29 12:22:27 crc kubenswrapper[4753]: I0129 12:22:27.973671 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qckl8\" (UniqueName: \"kubernetes.io/projected/6762f214-cfd1-4314-9fa0-1d2b40e87b4e-kube-api-access-qckl8\") pod \"openstack-galera-0\" (UID: \"6762f214-cfd1-4314-9fa0-1d2b40e87b4e\") " pod="swift-kuttl-tests/openstack-galera-0" Jan 29 12:22:27 crc kubenswrapper[4753]: I0129 12:22:27.973687 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/89e38fc2-9f07-45f0-8cdc-b77931872d7b-operator-scripts\") pod \"openstack-galera-1\" (UID: \"89e38fc2-9f07-45f0-8cdc-b77931872d7b\") " pod="swift-kuttl-tests/openstack-galera-1" Jan 29 12:22:27 crc kubenswrapper[4753]: I0129 12:22:27.973707 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"openstack-galera-2\" (UID: \"cc8c9706-0165-4cc6-ad22-bc70f03d5bf9\") " pod="swift-kuttl-tests/openstack-galera-2" Jan 29 12:22:27 crc kubenswrapper[4753]: I0129 12:22:27.973977 4753 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"openstack-galera-0\" (UID: \"6762f214-cfd1-4314-9fa0-1d2b40e87b4e\") device mount path \"/mnt/openstack/pv10\"" pod="swift-kuttl-tests/openstack-galera-0" Jan 29 12:22:27 crc kubenswrapper[4753]: I0129 12:22:27.974302 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/6762f214-cfd1-4314-9fa0-1d2b40e87b4e-config-data-generated\") pod \"openstack-galera-0\" (UID: \"6762f214-cfd1-4314-9fa0-1d2b40e87b4e\") " pod="swift-kuttl-tests/openstack-galera-0" Jan 29 12:22:27 crc kubenswrapper[4753]: I0129 12:22:27.974532 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/6762f214-cfd1-4314-9fa0-1d2b40e87b4e-config-data-default\") pod \"openstack-galera-0\" (UID: \"6762f214-cfd1-4314-9fa0-1d2b40e87b4e\") " pod="swift-kuttl-tests/openstack-galera-0" Jan 29 12:22:27 crc kubenswrapper[4753]: I0129 12:22:27.974924 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/6762f214-cfd1-4314-9fa0-1d2b40e87b4e-kolla-config\") pod \"openstack-galera-0\" (UID: \"6762f214-cfd1-4314-9fa0-1d2b40e87b4e\") " pod="swift-kuttl-tests/openstack-galera-0" Jan 29 12:22:27 crc kubenswrapper[4753]: I0129 12:22:27.976108 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6762f214-cfd1-4314-9fa0-1d2b40e87b4e-operator-scripts\") pod \"openstack-galera-0\" (UID: \"6762f214-cfd1-4314-9fa0-1d2b40e87b4e\") " pod="swift-kuttl-tests/openstack-galera-0" Jan 29 12:22:27 crc kubenswrapper[4753]: I0129 12:22:27.994291 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qckl8\" (UniqueName: \"kubernetes.io/projected/6762f214-cfd1-4314-9fa0-1d2b40e87b4e-kube-api-access-qckl8\") pod \"openstack-galera-0\" (UID: \"6762f214-cfd1-4314-9fa0-1d2b40e87b4e\") " pod="swift-kuttl-tests/openstack-galera-0" Jan 29 12:22:27 crc kubenswrapper[4753]: I0129 12:22:27.997681 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"openstack-galera-0\" (UID: \"6762f214-cfd1-4314-9fa0-1d2b40e87b4e\") " pod="swift-kuttl-tests/openstack-galera-0" Jan 29 12:22:28 crc kubenswrapper[4753]: I0129 12:22:28.075247 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/89e38fc2-9f07-45f0-8cdc-b77931872d7b-kolla-config\") pod \"openstack-galera-1\" (UID: \"89e38fc2-9f07-45f0-8cdc-b77931872d7b\") " pod="swift-kuttl-tests/openstack-galera-1" Jan 29 12:22:28 crc kubenswrapper[4753]: I0129 12:22:28.075366 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/cc8c9706-0165-4cc6-ad22-bc70f03d5bf9-config-data-default\") pod \"openstack-galera-2\" (UID: \"cc8c9706-0165-4cc6-ad22-bc70f03d5bf9\") " pod="swift-kuttl-tests/openstack-galera-2" Jan 29 12:22:28 crc kubenswrapper[4753]: I0129 12:22:28.075396 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q4dzf\" (UniqueName: \"kubernetes.io/projected/cc8c9706-0165-4cc6-ad22-bc70f03d5bf9-kube-api-access-q4dzf\") pod \"openstack-galera-2\" (UID: \"cc8c9706-0165-4cc6-ad22-bc70f03d5bf9\") " pod="swift-kuttl-tests/openstack-galera-2" Jan 29 12:22:28 crc kubenswrapper[4753]: I0129 12:22:28.075433 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/89e38fc2-9f07-45f0-8cdc-b77931872d7b-config-data-generated\") pod \"openstack-galera-1\" (UID: \"89e38fc2-9f07-45f0-8cdc-b77931872d7b\") " pod="swift-kuttl-tests/openstack-galera-1" Jan 29 12:22:28 crc kubenswrapper[4753]: I0129 12:22:28.075496 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/cc8c9706-0165-4cc6-ad22-bc70f03d5bf9-config-data-generated\") pod \"openstack-galera-2\" (UID: \"cc8c9706-0165-4cc6-ad22-bc70f03d5bf9\") " pod="swift-kuttl-tests/openstack-galera-2" Jan 29 12:22:28 crc kubenswrapper[4753]: I0129 12:22:28.075518 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6zlck\" (UniqueName: \"kubernetes.io/projected/89e38fc2-9f07-45f0-8cdc-b77931872d7b-kube-api-access-6zlck\") pod \"openstack-galera-1\" (UID: \"89e38fc2-9f07-45f0-8cdc-b77931872d7b\") " pod="swift-kuttl-tests/openstack-galera-1" Jan 29 12:22:28 crc kubenswrapper[4753]: I0129 12:22:28.075552 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/89e38fc2-9f07-45f0-8cdc-b77931872d7b-operator-scripts\") pod \"openstack-galera-1\" (UID: \"89e38fc2-9f07-45f0-8cdc-b77931872d7b\") " pod="swift-kuttl-tests/openstack-galera-1" Jan 29 12:22:28 crc kubenswrapper[4753]: I0129 12:22:28.075577 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"openstack-galera-2\" (UID: \"cc8c9706-0165-4cc6-ad22-bc70f03d5bf9\") " pod="swift-kuttl-tests/openstack-galera-2" Jan 29 12:22:28 crc kubenswrapper[4753]: I0129 12:22:28.075614 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/89e38fc2-9f07-45f0-8cdc-b77931872d7b-config-data-default\") pod \"openstack-galera-1\" (UID: \"89e38fc2-9f07-45f0-8cdc-b77931872d7b\") " pod="swift-kuttl-tests/openstack-galera-1" Jan 29 12:22:28 crc kubenswrapper[4753]: I0129 12:22:28.075650 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cc8c9706-0165-4cc6-ad22-bc70f03d5bf9-operator-scripts\") pod \"openstack-galera-2\" (UID: \"cc8c9706-0165-4cc6-ad22-bc70f03d5bf9\") " pod="swift-kuttl-tests/openstack-galera-2" Jan 29 12:22:28 crc kubenswrapper[4753]: I0129 12:22:28.075666 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/cc8c9706-0165-4cc6-ad22-bc70f03d5bf9-kolla-config\") pod \"openstack-galera-2\" (UID: \"cc8c9706-0165-4cc6-ad22-bc70f03d5bf9\") " pod="swift-kuttl-tests/openstack-galera-2" Jan 29 12:22:28 crc kubenswrapper[4753]: I0129 12:22:28.075702 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"openstack-galera-1\" (UID: \"89e38fc2-9f07-45f0-8cdc-b77931872d7b\") " pod="swift-kuttl-tests/openstack-galera-1" Jan 29 12:22:28 crc kubenswrapper[4753]: I0129 12:22:28.076138 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/89e38fc2-9f07-45f0-8cdc-b77931872d7b-kolla-config\") pod \"openstack-galera-1\" (UID: \"89e38fc2-9f07-45f0-8cdc-b77931872d7b\") " pod="swift-kuttl-tests/openstack-galera-1" Jan 29 12:22:28 crc kubenswrapper[4753]: I0129 12:22:28.076411 4753 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"openstack-galera-2\" (UID: \"cc8c9706-0165-4cc6-ad22-bc70f03d5bf9\") device mount path \"/mnt/openstack/pv03\"" pod="swift-kuttl-tests/openstack-galera-2" Jan 29 12:22:28 crc kubenswrapper[4753]: I0129 12:22:28.076536 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/cc8c9706-0165-4cc6-ad22-bc70f03d5bf9-config-data-default\") pod \"openstack-galera-2\" (UID: \"cc8c9706-0165-4cc6-ad22-bc70f03d5bf9\") " pod="swift-kuttl-tests/openstack-galera-2" Jan 29 12:22:28 crc kubenswrapper[4753]: I0129 12:22:28.076885 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/cc8c9706-0165-4cc6-ad22-bc70f03d5bf9-config-data-generated\") pod \"openstack-galera-2\" (UID: \"cc8c9706-0165-4cc6-ad22-bc70f03d5bf9\") " pod="swift-kuttl-tests/openstack-galera-2" Jan 29 12:22:28 crc kubenswrapper[4753]: I0129 12:22:28.077526 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/89e38fc2-9f07-45f0-8cdc-b77931872d7b-config-data-generated\") pod \"openstack-galera-1\" (UID: \"89e38fc2-9f07-45f0-8cdc-b77931872d7b\") " pod="swift-kuttl-tests/openstack-galera-1" Jan 29 12:22:28 crc kubenswrapper[4753]: I0129 12:22:28.077544 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/89e38fc2-9f07-45f0-8cdc-b77931872d7b-config-data-default\") pod \"openstack-galera-1\" (UID: \"89e38fc2-9f07-45f0-8cdc-b77931872d7b\") " pod="swift-kuttl-tests/openstack-galera-1" Jan 29 12:22:28 crc kubenswrapper[4753]: I0129 12:22:28.077647 4753 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"openstack-galera-1\" (UID: \"89e38fc2-9f07-45f0-8cdc-b77931872d7b\") device mount path \"/mnt/openstack/pv01\"" pod="swift-kuttl-tests/openstack-galera-1" Jan 29 12:22:28 crc kubenswrapper[4753]: I0129 12:22:28.077726 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/cc8c9706-0165-4cc6-ad22-bc70f03d5bf9-kolla-config\") pod \"openstack-galera-2\" (UID: \"cc8c9706-0165-4cc6-ad22-bc70f03d5bf9\") " pod="swift-kuttl-tests/openstack-galera-2" Jan 29 12:22:28 crc kubenswrapper[4753]: I0129 12:22:28.077921 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/openstack-galera-0" Jan 29 12:22:28 crc kubenswrapper[4753]: I0129 12:22:28.078583 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cc8c9706-0165-4cc6-ad22-bc70f03d5bf9-operator-scripts\") pod \"openstack-galera-2\" (UID: \"cc8c9706-0165-4cc6-ad22-bc70f03d5bf9\") " pod="swift-kuttl-tests/openstack-galera-2" Jan 29 12:22:28 crc kubenswrapper[4753]: I0129 12:22:28.090477 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/89e38fc2-9f07-45f0-8cdc-b77931872d7b-operator-scripts\") pod \"openstack-galera-1\" (UID: \"89e38fc2-9f07-45f0-8cdc-b77931872d7b\") " pod="swift-kuttl-tests/openstack-galera-1" Jan 29 12:22:28 crc kubenswrapper[4753]: I0129 12:22:28.106515 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"openstack-galera-2\" (UID: \"cc8c9706-0165-4cc6-ad22-bc70f03d5bf9\") " pod="swift-kuttl-tests/openstack-galera-2" Jan 29 12:22:28 crc kubenswrapper[4753]: I0129 12:22:28.197932 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q4dzf\" (UniqueName: \"kubernetes.io/projected/cc8c9706-0165-4cc6-ad22-bc70f03d5bf9-kube-api-access-q4dzf\") pod \"openstack-galera-2\" (UID: \"cc8c9706-0165-4cc6-ad22-bc70f03d5bf9\") " pod="swift-kuttl-tests/openstack-galera-2" Jan 29 12:22:28 crc kubenswrapper[4753]: I0129 12:22:28.199336 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6zlck\" (UniqueName: \"kubernetes.io/projected/89e38fc2-9f07-45f0-8cdc-b77931872d7b-kube-api-access-6zlck\") pod \"openstack-galera-1\" (UID: \"89e38fc2-9f07-45f0-8cdc-b77931872d7b\") " pod="swift-kuttl-tests/openstack-galera-1" Jan 29 12:22:28 crc kubenswrapper[4753]: I0129 12:22:28.199649 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"openstack-galera-1\" (UID: \"89e38fc2-9f07-45f0-8cdc-b77931872d7b\") " pod="swift-kuttl-tests/openstack-galera-1" Jan 29 12:22:28 crc kubenswrapper[4753]: I0129 12:22:28.408881 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/openstack-galera-1" Jan 29 12:22:28 crc kubenswrapper[4753]: E0129 12:22:28.434108 4753 secret.go:188] Couldn't get secret openstack-operators/infra-operator-controller-manager-service-cert: failed to sync secret cache: timed out waiting for the condition Jan 29 12:22:28 crc kubenswrapper[4753]: E0129 12:22:28.434358 4753 secret.go:188] Couldn't get secret openstack-operators/infra-operator-controller-manager-service-cert: failed to sync secret cache: timed out waiting for the condition Jan 29 12:22:28 crc kubenswrapper[4753]: E0129 12:22:28.434391 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6ad1bd9c-bfda-45eb-b89c-0ae519acdfdf-webhook-cert podName:6ad1bd9c-bfda-45eb-b89c-0ae519acdfdf nodeName:}" failed. No retries permitted until 2026-01-29 12:22:28.934327363 +0000 UTC m=+963.186408808 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "webhook-cert" (UniqueName: "kubernetes.io/secret/6ad1bd9c-bfda-45eb-b89c-0ae519acdfdf-webhook-cert") pod "infra-operator-controller-manager-79c7d496c4-zf52n" (UID: "6ad1bd9c-bfda-45eb-b89c-0ae519acdfdf") : failed to sync secret cache: timed out waiting for the condition Jan 29 12:22:28 crc kubenswrapper[4753]: E0129 12:22:28.434428 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6ad1bd9c-bfda-45eb-b89c-0ae519acdfdf-apiservice-cert podName:6ad1bd9c-bfda-45eb-b89c-0ae519acdfdf nodeName:}" failed. No retries permitted until 2026-01-29 12:22:28.934411435 +0000 UTC m=+963.186492890 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "apiservice-cert" (UniqueName: "kubernetes.io/secret/6ad1bd9c-bfda-45eb-b89c-0ae519acdfdf-apiservice-cert") pod "infra-operator-controller-manager-79c7d496c4-zf52n" (UID: "6ad1bd9c-bfda-45eb-b89c-0ae519acdfdf") : failed to sync secret cache: timed out waiting for the condition Jan 29 12:22:28 crc kubenswrapper[4753]: I0129 12:22:28.448086 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/openstack-galera-2" Jan 29 12:22:28 crc kubenswrapper[4753]: I0129 12:22:28.678463 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-service-cert" Jan 29 12:22:28 crc kubenswrapper[4753]: I0129 12:22:28.770316 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/openstack-galera-0"] Jan 29 12:22:28 crc kubenswrapper[4753]: I0129 12:22:28.888653 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/openstack-galera-1"] Jan 29 12:22:28 crc kubenswrapper[4753]: I0129 12:22:28.897571 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-hdclg" Jan 29 12:22:28 crc kubenswrapper[4753]: W0129 12:22:28.899407 4753 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod89e38fc2_9f07_45f0_8cdc_b77931872d7b.slice/crio-776f09c496322a7720210fa16c8094598b4a27e6914b1aca53b45825a605ac4d WatchSource:0}: Error finding container 776f09c496322a7720210fa16c8094598b4a27e6914b1aca53b45825a605ac4d: Status 404 returned error can't find the container with id 776f09c496322a7720210fa16c8094598b4a27e6914b1aca53b45825a605ac4d Jan 29 12:22:28 crc kubenswrapper[4753]: I0129 12:22:28.980033 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/6ad1bd9c-bfda-45eb-b89c-0ae519acdfdf-webhook-cert\") pod \"infra-operator-controller-manager-79c7d496c4-zf52n\" (UID: \"6ad1bd9c-bfda-45eb-b89c-0ae519acdfdf\") " pod="openstack-operators/infra-operator-controller-manager-79c7d496c4-zf52n" Jan 29 12:22:28 crc kubenswrapper[4753]: I0129 12:22:28.980128 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/6ad1bd9c-bfda-45eb-b89c-0ae519acdfdf-apiservice-cert\") pod \"infra-operator-controller-manager-79c7d496c4-zf52n\" (UID: \"6ad1bd9c-bfda-45eb-b89c-0ae519acdfdf\") " pod="openstack-operators/infra-operator-controller-manager-79c7d496c4-zf52n" Jan 29 12:22:28 crc kubenswrapper[4753]: I0129 12:22:28.986742 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/openstack-galera-1" event={"ID":"89e38fc2-9f07-45f0-8cdc-b77931872d7b","Type":"ContainerStarted","Data":"776f09c496322a7720210fa16c8094598b4a27e6914b1aca53b45825a605ac4d"} Jan 29 12:22:28 crc kubenswrapper[4753]: I0129 12:22:28.988300 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/openstack-galera-0" event={"ID":"6762f214-cfd1-4314-9fa0-1d2b40e87b4e","Type":"ContainerStarted","Data":"c9bd144c9fff972491689a269099cc2a7a35963bbf70b4907d949fc955bce913"} Jan 29 12:22:28 crc kubenswrapper[4753]: I0129 12:22:28.989563 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/6ad1bd9c-bfda-45eb-b89c-0ae519acdfdf-webhook-cert\") pod \"infra-operator-controller-manager-79c7d496c4-zf52n\" (UID: \"6ad1bd9c-bfda-45eb-b89c-0ae519acdfdf\") " pod="openstack-operators/infra-operator-controller-manager-79c7d496c4-zf52n" Jan 29 12:22:28 crc kubenswrapper[4753]: I0129 12:22:28.989992 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/6ad1bd9c-bfda-45eb-b89c-0ae519acdfdf-apiservice-cert\") pod \"infra-operator-controller-manager-79c7d496c4-zf52n\" (UID: \"6ad1bd9c-bfda-45eb-b89c-0ae519acdfdf\") " pod="openstack-operators/infra-operator-controller-manager-79c7d496c4-zf52n" Jan 29 12:22:29 crc kubenswrapper[4753]: I0129 12:22:29.144321 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/openstack-galera-2"] Jan 29 12:22:29 crc kubenswrapper[4753]: I0129 12:22:29.147991 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-79c7d496c4-zf52n" Jan 29 12:22:29 crc kubenswrapper[4753]: I0129 12:22:29.504462 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-79c7d496c4-zf52n"] Jan 29 12:22:29 crc kubenswrapper[4753]: W0129 12:22:29.513413 4753 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6ad1bd9c_bfda_45eb_b89c_0ae519acdfdf.slice/crio-346c6fa4ffb84d933401cb18c1fd1bc5863f538cb25b7def3ec1ae7792f3afe0 WatchSource:0}: Error finding container 346c6fa4ffb84d933401cb18c1fd1bc5863f538cb25b7def3ec1ae7792f3afe0: Status 404 returned error can't find the container with id 346c6fa4ffb84d933401cb18c1fd1bc5863f538cb25b7def3ec1ae7792f3afe0 Jan 29 12:22:30 crc kubenswrapper[4753]: I0129 12:22:30.109361 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-79c7d496c4-zf52n" event={"ID":"6ad1bd9c-bfda-45eb-b89c-0ae519acdfdf","Type":"ContainerStarted","Data":"346c6fa4ffb84d933401cb18c1fd1bc5863f538cb25b7def3ec1ae7792f3afe0"} Jan 29 12:22:30 crc kubenswrapper[4753]: I0129 12:22:30.118144 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/openstack-galera-2" event={"ID":"cc8c9706-0165-4cc6-ad22-bc70f03d5bf9","Type":"ContainerStarted","Data":"e9712456d995f396f56aa44cf6d5fcb616e59dc82ef5ecc85e33b86cb121dcfd"} Jan 29 12:22:45 crc kubenswrapper[4753]: I0129 12:22:45.352309 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-79c7d496c4-zf52n" event={"ID":"6ad1bd9c-bfda-45eb-b89c-0ae519acdfdf","Type":"ContainerStarted","Data":"6ec9bf2fa311e9e2dea6efa58eb61abec5053750793a56fd5c499c2744674742"} Jan 29 12:22:45 crc kubenswrapper[4753]: I0129 12:22:45.353529 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-79c7d496c4-zf52n" Jan 29 12:22:45 crc kubenswrapper[4753]: I0129 12:22:45.382531 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-controller-manager-79c7d496c4-zf52n" podStartSLOduration=3.54320367 podStartE2EDuration="18.382513518s" podCreationTimestamp="2026-01-29 12:22:27 +0000 UTC" firstStartedPulling="2026-01-29 12:22:29.517252051 +0000 UTC m=+963.769333506" lastFinishedPulling="2026-01-29 12:22:44.356561899 +0000 UTC m=+978.608643354" observedRunningTime="2026-01-29 12:22:45.379445609 +0000 UTC m=+979.631527074" watchObservedRunningTime="2026-01-29 12:22:45.382513518 +0000 UTC m=+979.634594973" Jan 29 12:22:45 crc kubenswrapper[4753]: E0129 12:22:45.392397 4753 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-mariadb@sha256:ed0f8ba03f3ce47a32006d730c3049455325eb2c3b98b9fd6b3fb9901004df13" Jan 29 12:22:45 crc kubenswrapper[4753]: E0129 12:22:45.392695 4753 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:mysql-bootstrap,Image:quay.io/podified-antelope-centos9/openstack-mariadb@sha256:ed0f8ba03f3ce47a32006d730c3049455325eb2c3b98b9fd6b3fb9901004df13,Command:[bash /var/lib/operator-scripts/mysql_bootstrap.sh],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:True,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:mysql-db,ReadOnly:false,MountPath:/var/lib/mysql,SubPath:mysql,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-default,ReadOnly:true,MountPath:/var/lib/config-data/default,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-generated,ReadOnly:false,MountPath:/var/lib/config-data/generated,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:operator-scripts,ReadOnly:true,MountPath:/var/lib/operator-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kolla-config,ReadOnly:true,MountPath:/var/lib/kolla/config_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-q4dzf,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-galera-2_swift-kuttl-tests(cc8c9706-0165-4cc6-ad22-bc70f03d5bf9): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 29 12:22:45 crc kubenswrapper[4753]: E0129 12:22:45.394010 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="swift-kuttl-tests/openstack-galera-2" podUID="cc8c9706-0165-4cc6-ad22-bc70f03d5bf9" Jan 29 12:22:45 crc kubenswrapper[4753]: E0129 12:22:45.756321 4753 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-mariadb@sha256:ed0f8ba03f3ce47a32006d730c3049455325eb2c3b98b9fd6b3fb9901004df13" Jan 29 12:22:45 crc kubenswrapper[4753]: E0129 12:22:45.756695 4753 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:mysql-bootstrap,Image:quay.io/podified-antelope-centos9/openstack-mariadb@sha256:ed0f8ba03f3ce47a32006d730c3049455325eb2c3b98b9fd6b3fb9901004df13,Command:[bash /var/lib/operator-scripts/mysql_bootstrap.sh],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:True,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:mysql-db,ReadOnly:false,MountPath:/var/lib/mysql,SubPath:mysql,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-default,ReadOnly:true,MountPath:/var/lib/config-data/default,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-generated,ReadOnly:false,MountPath:/var/lib/config-data/generated,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:operator-scripts,ReadOnly:true,MountPath:/var/lib/operator-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kolla-config,ReadOnly:true,MountPath:/var/lib/kolla/config_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-6zlck,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-galera-1_swift-kuttl-tests(89e38fc2-9f07-45f0-8cdc-b77931872d7b): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 29 12:22:45 crc kubenswrapper[4753]: E0129 12:22:45.757949 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="swift-kuttl-tests/openstack-galera-1" podUID="89e38fc2-9f07-45f0-8cdc-b77931872d7b" Jan 29 12:22:46 crc kubenswrapper[4753]: E0129 12:22:46.316967 4753 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-mariadb@sha256:ed0f8ba03f3ce47a32006d730c3049455325eb2c3b98b9fd6b3fb9901004df13" Jan 29 12:22:46 crc kubenswrapper[4753]: E0129 12:22:46.318300 4753 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:mysql-bootstrap,Image:quay.io/podified-antelope-centos9/openstack-mariadb@sha256:ed0f8ba03f3ce47a32006d730c3049455325eb2c3b98b9fd6b3fb9901004df13,Command:[bash /var/lib/operator-scripts/mysql_bootstrap.sh],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:True,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:mysql-db,ReadOnly:false,MountPath:/var/lib/mysql,SubPath:mysql,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-default,ReadOnly:true,MountPath:/var/lib/config-data/default,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-generated,ReadOnly:false,MountPath:/var/lib/config-data/generated,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:operator-scripts,ReadOnly:true,MountPath:/var/lib/operator-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kolla-config,ReadOnly:true,MountPath:/var/lib/kolla/config_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-qckl8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-galera-0_swift-kuttl-tests(6762f214-cfd1-4314-9fa0-1d2b40e87b4e): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 29 12:22:46 crc kubenswrapper[4753]: E0129 12:22:46.319618 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="swift-kuttl-tests/openstack-galera-0" podUID="6762f214-cfd1-4314-9fa0-1d2b40e87b4e" Jan 29 12:22:46 crc kubenswrapper[4753]: E0129 12:22:46.361406 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-mariadb@sha256:ed0f8ba03f3ce47a32006d730c3049455325eb2c3b98b9fd6b3fb9901004df13\\\"\"" pod="swift-kuttl-tests/openstack-galera-1" podUID="89e38fc2-9f07-45f0-8cdc-b77931872d7b" Jan 29 12:22:46 crc kubenswrapper[4753]: E0129 12:22:46.361601 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-mariadb@sha256:ed0f8ba03f3ce47a32006d730c3049455325eb2c3b98b9fd6b3fb9901004df13\\\"\"" pod="swift-kuttl-tests/openstack-galera-2" podUID="cc8c9706-0165-4cc6-ad22-bc70f03d5bf9" Jan 29 12:22:46 crc kubenswrapper[4753]: E0129 12:22:46.361610 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-mariadb@sha256:ed0f8ba03f3ce47a32006d730c3049455325eb2c3b98b9fd6b3fb9901004df13\\\"\"" pod="swift-kuttl-tests/openstack-galera-0" podUID="6762f214-cfd1-4314-9fa0-1d2b40e87b4e" Jan 29 12:22:55 crc kubenswrapper[4753]: I0129 12:22:55.546412 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-lkj8l"] Jan 29 12:22:55 crc kubenswrapper[4753]: I0129 12:22:55.549617 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-lkj8l" Jan 29 12:22:55 crc kubenswrapper[4753]: I0129 12:22:55.556131 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-lkj8l"] Jan 29 12:22:55 crc kubenswrapper[4753]: I0129 12:22:55.606170 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f61be96f-9f6c-42c3-a177-afa775bdee56-catalog-content\") pod \"community-operators-lkj8l\" (UID: \"f61be96f-9f6c-42c3-a177-afa775bdee56\") " pod="openshift-marketplace/community-operators-lkj8l" Jan 29 12:22:55 crc kubenswrapper[4753]: I0129 12:22:55.606645 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f61be96f-9f6c-42c3-a177-afa775bdee56-utilities\") pod \"community-operators-lkj8l\" (UID: \"f61be96f-9f6c-42c3-a177-afa775bdee56\") " pod="openshift-marketplace/community-operators-lkj8l" Jan 29 12:22:55 crc kubenswrapper[4753]: I0129 12:22:55.606686 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sk6rk\" (UniqueName: \"kubernetes.io/projected/f61be96f-9f6c-42c3-a177-afa775bdee56-kube-api-access-sk6rk\") pod \"community-operators-lkj8l\" (UID: \"f61be96f-9f6c-42c3-a177-afa775bdee56\") " pod="openshift-marketplace/community-operators-lkj8l" Jan 29 12:22:55 crc kubenswrapper[4753]: I0129 12:22:55.713315 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f61be96f-9f6c-42c3-a177-afa775bdee56-catalog-content\") pod \"community-operators-lkj8l\" (UID: \"f61be96f-9f6c-42c3-a177-afa775bdee56\") " pod="openshift-marketplace/community-operators-lkj8l" Jan 29 12:22:55 crc kubenswrapper[4753]: I0129 12:22:55.713411 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f61be96f-9f6c-42c3-a177-afa775bdee56-utilities\") pod \"community-operators-lkj8l\" (UID: \"f61be96f-9f6c-42c3-a177-afa775bdee56\") " pod="openshift-marketplace/community-operators-lkj8l" Jan 29 12:22:55 crc kubenswrapper[4753]: I0129 12:22:55.713460 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sk6rk\" (UniqueName: \"kubernetes.io/projected/f61be96f-9f6c-42c3-a177-afa775bdee56-kube-api-access-sk6rk\") pod \"community-operators-lkj8l\" (UID: \"f61be96f-9f6c-42c3-a177-afa775bdee56\") " pod="openshift-marketplace/community-operators-lkj8l" Jan 29 12:22:55 crc kubenswrapper[4753]: I0129 12:22:55.761781 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f61be96f-9f6c-42c3-a177-afa775bdee56-catalog-content\") pod \"community-operators-lkj8l\" (UID: \"f61be96f-9f6c-42c3-a177-afa775bdee56\") " pod="openshift-marketplace/community-operators-lkj8l" Jan 29 12:22:55 crc kubenswrapper[4753]: I0129 12:22:55.764716 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f61be96f-9f6c-42c3-a177-afa775bdee56-utilities\") pod \"community-operators-lkj8l\" (UID: \"f61be96f-9f6c-42c3-a177-afa775bdee56\") " pod="openshift-marketplace/community-operators-lkj8l" Jan 29 12:22:55 crc kubenswrapper[4753]: I0129 12:22:55.817762 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sk6rk\" (UniqueName: \"kubernetes.io/projected/f61be96f-9f6c-42c3-a177-afa775bdee56-kube-api-access-sk6rk\") pod \"community-operators-lkj8l\" (UID: \"f61be96f-9f6c-42c3-a177-afa775bdee56\") " pod="openshift-marketplace/community-operators-lkj8l" Jan 29 12:22:56 crc kubenswrapper[4753]: I0129 12:22:56.063407 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-lkj8l" Jan 29 12:22:56 crc kubenswrapper[4753]: I0129 12:22:56.715483 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-lkj8l"] Jan 29 12:22:57 crc kubenswrapper[4753]: I0129 12:22:57.604587 4753 generic.go:334] "Generic (PLEG): container finished" podID="f61be96f-9f6c-42c3-a177-afa775bdee56" containerID="68ef35eeea67657d51a98b0e425d794a07e9d5edce4a02ad1b406849d5f62e8f" exitCode=0 Jan 29 12:22:57 crc kubenswrapper[4753]: I0129 12:22:57.604674 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lkj8l" event={"ID":"f61be96f-9f6c-42c3-a177-afa775bdee56","Type":"ContainerDied","Data":"68ef35eeea67657d51a98b0e425d794a07e9d5edce4a02ad1b406849d5f62e8f"} Jan 29 12:22:57 crc kubenswrapper[4753]: I0129 12:22:57.604892 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lkj8l" event={"ID":"f61be96f-9f6c-42c3-a177-afa775bdee56","Type":"ContainerStarted","Data":"7b07076de8fbfbda644a70c1e09865b81f65d3dda7d097dde93bf853361c089d"} Jan 29 12:22:58 crc kubenswrapper[4753]: I0129 12:22:58.612034 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lkj8l" event={"ID":"f61be96f-9f6c-42c3-a177-afa775bdee56","Type":"ContainerStarted","Data":"26653a3054e3b6fb02a21626146208eea029d9dba5bb9f97ec5c1163b2744615"} Jan 29 12:22:59 crc kubenswrapper[4753]: I0129 12:22:59.152930 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-79c7d496c4-zf52n" Jan 29 12:22:59 crc kubenswrapper[4753]: I0129 12:22:59.253443 4753 patch_prober.go:28] interesting pod/machine-config-daemon-7c24x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 12:22:59 crc kubenswrapper[4753]: I0129 12:22:59.253590 4753 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" podUID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 12:22:59 crc kubenswrapper[4753]: I0129 12:22:59.624747 4753 generic.go:334] "Generic (PLEG): container finished" podID="f61be96f-9f6c-42c3-a177-afa775bdee56" containerID="26653a3054e3b6fb02a21626146208eea029d9dba5bb9f97ec5c1163b2744615" exitCode=0 Jan 29 12:22:59 crc kubenswrapper[4753]: I0129 12:22:59.624836 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lkj8l" event={"ID":"f61be96f-9f6c-42c3-a177-afa775bdee56","Type":"ContainerDied","Data":"26653a3054e3b6fb02a21626146208eea029d9dba5bb9f97ec5c1163b2744615"} Jan 29 12:23:00 crc kubenswrapper[4753]: I0129 12:23:00.644214 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/openstack-galera-0" event={"ID":"6762f214-cfd1-4314-9fa0-1d2b40e87b4e","Type":"ContainerStarted","Data":"986ff07fe034f7d5e0217ddc0a81108f405dcdf609bc954e6240eac23f6231c0"} Jan 29 12:23:00 crc kubenswrapper[4753]: I0129 12:23:00.648870 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/openstack-galera-1" event={"ID":"89e38fc2-9f07-45f0-8cdc-b77931872d7b","Type":"ContainerStarted","Data":"4801ace0c26402ef4218df8ec6892a76c6d150ad4031dff6e3e9e00bf547217d"} Jan 29 12:23:00 crc kubenswrapper[4753]: I0129 12:23:00.656424 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lkj8l" event={"ID":"f61be96f-9f6c-42c3-a177-afa775bdee56","Type":"ContainerStarted","Data":"9cffd21ffcaf4fa14d778002e6bc280011ec737e7f0f1dbac08e912221640025"} Jan 29 12:23:00 crc kubenswrapper[4753]: I0129 12:23:00.718353 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-lkj8l" podStartSLOduration=3.192997686 podStartE2EDuration="5.718310785s" podCreationTimestamp="2026-01-29 12:22:55 +0000 UTC" firstStartedPulling="2026-01-29 12:22:57.606559404 +0000 UTC m=+991.858640859" lastFinishedPulling="2026-01-29 12:23:00.131872503 +0000 UTC m=+994.383953958" observedRunningTime="2026-01-29 12:23:00.712032913 +0000 UTC m=+994.964114368" watchObservedRunningTime="2026-01-29 12:23:00.718310785 +0000 UTC m=+994.970392230" Jan 29 12:23:01 crc kubenswrapper[4753]: I0129 12:23:01.665878 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/openstack-galera-2" event={"ID":"cc8c9706-0165-4cc6-ad22-bc70f03d5bf9","Type":"ContainerStarted","Data":"9e7761deaabf5405004d0a17c8a33805dd7077c52d15bba22c2ae2e917b92fc0"} Jan 29 12:23:03 crc kubenswrapper[4753]: I0129 12:23:03.229385 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["swift-kuttl-tests/memcached-0"] Jan 29 12:23:03 crc kubenswrapper[4753]: I0129 12:23:03.230847 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/memcached-0" Jan 29 12:23:03 crc kubenswrapper[4753]: I0129 12:23:03.236333 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"swift-kuttl-tests"/"memcached-config-data" Jan 29 12:23:03 crc kubenswrapper[4753]: I0129 12:23:03.236378 4753 reflector.go:368] Caches populated for *v1.Secret from object-"swift-kuttl-tests"/"memcached-memcached-dockercfg-mg8v2" Jan 29 12:23:03 crc kubenswrapper[4753]: I0129 12:23:03.245801 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/memcached-0"] Jan 29 12:23:03 crc kubenswrapper[4753]: I0129 12:23:03.283324 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/bd1f7149-df12-4c02-9585-238bfd26f12d-kolla-config\") pod \"memcached-0\" (UID: \"bd1f7149-df12-4c02-9585-238bfd26f12d\") " pod="swift-kuttl-tests/memcached-0" Jan 29 12:23:03 crc kubenswrapper[4753]: I0129 12:23:03.283385 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/bd1f7149-df12-4c02-9585-238bfd26f12d-config-data\") pod \"memcached-0\" (UID: \"bd1f7149-df12-4c02-9585-238bfd26f12d\") " pod="swift-kuttl-tests/memcached-0" Jan 29 12:23:03 crc kubenswrapper[4753]: I0129 12:23:03.283475 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zfhlb\" (UniqueName: \"kubernetes.io/projected/bd1f7149-df12-4c02-9585-238bfd26f12d-kube-api-access-zfhlb\") pod \"memcached-0\" (UID: \"bd1f7149-df12-4c02-9585-238bfd26f12d\") " pod="swift-kuttl-tests/memcached-0" Jan 29 12:23:03 crc kubenswrapper[4753]: I0129 12:23:03.384792 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zfhlb\" (UniqueName: \"kubernetes.io/projected/bd1f7149-df12-4c02-9585-238bfd26f12d-kube-api-access-zfhlb\") pod \"memcached-0\" (UID: \"bd1f7149-df12-4c02-9585-238bfd26f12d\") " pod="swift-kuttl-tests/memcached-0" Jan 29 12:23:03 crc kubenswrapper[4753]: I0129 12:23:03.384962 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/bd1f7149-df12-4c02-9585-238bfd26f12d-kolla-config\") pod \"memcached-0\" (UID: \"bd1f7149-df12-4c02-9585-238bfd26f12d\") " pod="swift-kuttl-tests/memcached-0" Jan 29 12:23:03 crc kubenswrapper[4753]: I0129 12:23:03.385005 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/bd1f7149-df12-4c02-9585-238bfd26f12d-config-data\") pod \"memcached-0\" (UID: \"bd1f7149-df12-4c02-9585-238bfd26f12d\") " pod="swift-kuttl-tests/memcached-0" Jan 29 12:23:03 crc kubenswrapper[4753]: I0129 12:23:03.386037 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/bd1f7149-df12-4c02-9585-238bfd26f12d-kolla-config\") pod \"memcached-0\" (UID: \"bd1f7149-df12-4c02-9585-238bfd26f12d\") " pod="swift-kuttl-tests/memcached-0" Jan 29 12:23:03 crc kubenswrapper[4753]: I0129 12:23:03.386527 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/bd1f7149-df12-4c02-9585-238bfd26f12d-config-data\") pod \"memcached-0\" (UID: \"bd1f7149-df12-4c02-9585-238bfd26f12d\") " pod="swift-kuttl-tests/memcached-0" Jan 29 12:23:03 crc kubenswrapper[4753]: I0129 12:23:03.406335 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zfhlb\" (UniqueName: \"kubernetes.io/projected/bd1f7149-df12-4c02-9585-238bfd26f12d-kube-api-access-zfhlb\") pod \"memcached-0\" (UID: \"bd1f7149-df12-4c02-9585-238bfd26f12d\") " pod="swift-kuttl-tests/memcached-0" Jan 29 12:23:03 crc kubenswrapper[4753]: I0129 12:23:03.552889 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/memcached-0" Jan 29 12:23:03 crc kubenswrapper[4753]: I0129 12:23:03.839869 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/memcached-0"] Jan 29 12:23:04 crc kubenswrapper[4753]: I0129 12:23:04.689255 4753 generic.go:334] "Generic (PLEG): container finished" podID="6762f214-cfd1-4314-9fa0-1d2b40e87b4e" containerID="986ff07fe034f7d5e0217ddc0a81108f405dcdf609bc954e6240eac23f6231c0" exitCode=0 Jan 29 12:23:04 crc kubenswrapper[4753]: I0129 12:23:04.689355 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/openstack-galera-0" event={"ID":"6762f214-cfd1-4314-9fa0-1d2b40e87b4e","Type":"ContainerDied","Data":"986ff07fe034f7d5e0217ddc0a81108f405dcdf609bc954e6240eac23f6231c0"} Jan 29 12:23:04 crc kubenswrapper[4753]: I0129 12:23:04.692095 4753 generic.go:334] "Generic (PLEG): container finished" podID="89e38fc2-9f07-45f0-8cdc-b77931872d7b" containerID="4801ace0c26402ef4218df8ec6892a76c6d150ad4031dff6e3e9e00bf547217d" exitCode=0 Jan 29 12:23:04 crc kubenswrapper[4753]: I0129 12:23:04.692141 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/openstack-galera-1" event={"ID":"89e38fc2-9f07-45f0-8cdc-b77931872d7b","Type":"ContainerDied","Data":"4801ace0c26402ef4218df8ec6892a76c6d150ad4031dff6e3e9e00bf547217d"} Jan 29 12:23:04 crc kubenswrapper[4753]: I0129 12:23:04.693271 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/memcached-0" event={"ID":"bd1f7149-df12-4c02-9585-238bfd26f12d","Type":"ContainerStarted","Data":"45784504d0eb5f699a70326b32b71dc770138ec4ab5273af9162b72ee4adf912"} Jan 29 12:23:05 crc kubenswrapper[4753]: I0129 12:23:05.712516 4753 generic.go:334] "Generic (PLEG): container finished" podID="cc8c9706-0165-4cc6-ad22-bc70f03d5bf9" containerID="9e7761deaabf5405004d0a17c8a33805dd7077c52d15bba22c2ae2e917b92fc0" exitCode=0 Jan 29 12:23:05 crc kubenswrapper[4753]: I0129 12:23:05.712806 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/openstack-galera-2" event={"ID":"cc8c9706-0165-4cc6-ad22-bc70f03d5bf9","Type":"ContainerDied","Data":"9e7761deaabf5405004d0a17c8a33805dd7077c52d15bba22c2ae2e917b92fc0"} Jan 29 12:23:06 crc kubenswrapper[4753]: I0129 12:23:06.063688 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-lkj8l" Jan 29 12:23:06 crc kubenswrapper[4753]: I0129 12:23:06.063773 4753 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-lkj8l" Jan 29 12:23:06 crc kubenswrapper[4753]: I0129 12:23:06.113250 4753 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-lkj8l" Jan 29 12:23:06 crc kubenswrapper[4753]: I0129 12:23:06.343460 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-index-w4plk"] Jan 29 12:23:06 crc kubenswrapper[4753]: I0129 12:23:06.344447 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-index-w4plk" Jan 29 12:23:06 crc kubenswrapper[4753]: I0129 12:23:06.349299 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-index-dockercfg-8m4h9" Jan 29 12:23:06 crc kubenswrapper[4753]: I0129 12:23:06.349710 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-index-w4plk"] Jan 29 12:23:06 crc kubenswrapper[4753]: I0129 12:23:06.501189 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2clp2\" (UniqueName: \"kubernetes.io/projected/d629e968-732d-4000-9ad9-16dceefa3077-kube-api-access-2clp2\") pod \"rabbitmq-cluster-operator-index-w4plk\" (UID: \"d629e968-732d-4000-9ad9-16dceefa3077\") " pod="openstack-operators/rabbitmq-cluster-operator-index-w4plk" Jan 29 12:23:06 crc kubenswrapper[4753]: I0129 12:23:06.603095 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2clp2\" (UniqueName: \"kubernetes.io/projected/d629e968-732d-4000-9ad9-16dceefa3077-kube-api-access-2clp2\") pod \"rabbitmq-cluster-operator-index-w4plk\" (UID: \"d629e968-732d-4000-9ad9-16dceefa3077\") " pod="openstack-operators/rabbitmq-cluster-operator-index-w4plk" Jan 29 12:23:06 crc kubenswrapper[4753]: I0129 12:23:06.625689 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2clp2\" (UniqueName: \"kubernetes.io/projected/d629e968-732d-4000-9ad9-16dceefa3077-kube-api-access-2clp2\") pod \"rabbitmq-cluster-operator-index-w4plk\" (UID: \"d629e968-732d-4000-9ad9-16dceefa3077\") " pod="openstack-operators/rabbitmq-cluster-operator-index-w4plk" Jan 29 12:23:06 crc kubenswrapper[4753]: I0129 12:23:06.668581 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-index-w4plk" Jan 29 12:23:06 crc kubenswrapper[4753]: I0129 12:23:06.727019 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/openstack-galera-1" event={"ID":"89e38fc2-9f07-45f0-8cdc-b77931872d7b","Type":"ContainerStarted","Data":"fd2d4883cbb5e061192a0055cb839eaec1e2e1653c1b4339edbd2330756bbef4"} Jan 29 12:23:06 crc kubenswrapper[4753]: I0129 12:23:06.733261 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/openstack-galera-2" event={"ID":"cc8c9706-0165-4cc6-ad22-bc70f03d5bf9","Type":"ContainerStarted","Data":"a5d51765dc77046a849f5638d73d0b342a8786d872c3f6feb3a67d55f33d5ea1"} Jan 29 12:23:06 crc kubenswrapper[4753]: I0129 12:23:06.737967 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/openstack-galera-0" event={"ID":"6762f214-cfd1-4314-9fa0-1d2b40e87b4e","Type":"ContainerStarted","Data":"955c359e87ea643dd73d843cc3480910edf3ef440c787c82d01331b918f7cb58"} Jan 29 12:23:06 crc kubenswrapper[4753]: I0129 12:23:06.764477 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="swift-kuttl-tests/openstack-galera-1" podStartSLOduration=-9223371996.090347 podStartE2EDuration="40.764428705s" podCreationTimestamp="2026-01-29 12:22:26 +0000 UTC" firstStartedPulling="2026-01-29 12:22:28.903032101 +0000 UTC m=+963.155113556" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:23:06.756913647 +0000 UTC m=+1001.008995102" watchObservedRunningTime="2026-01-29 12:23:06.764428705 +0000 UTC m=+1001.016510190" Jan 29 12:23:06 crc kubenswrapper[4753]: I0129 12:23:06.804072 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-lkj8l" Jan 29 12:23:06 crc kubenswrapper[4753]: I0129 12:23:06.832206 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="swift-kuttl-tests/openstack-galera-2" podStartSLOduration=-9223371996.022598 podStartE2EDuration="40.832176918s" podCreationTimestamp="2026-01-29 12:22:26 +0000 UTC" firstStartedPulling="2026-01-29 12:22:29.150511052 +0000 UTC m=+963.402592507" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:23:06.803087681 +0000 UTC m=+1001.055169146" watchObservedRunningTime="2026-01-29 12:23:06.832176918 +0000 UTC m=+1001.084258373" Jan 29 12:23:06 crc kubenswrapper[4753]: I0129 12:23:06.850629 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="swift-kuttl-tests/openstack-galera-0" podStartSLOduration=10.228597704 podStartE2EDuration="40.850604624s" podCreationTimestamp="2026-01-29 12:22:26 +0000 UTC" firstStartedPulling="2026-01-29 12:22:28.793292312 +0000 UTC m=+963.045373767" lastFinishedPulling="2026-01-29 12:22:59.415299232 +0000 UTC m=+993.667380687" observedRunningTime="2026-01-29 12:23:06.830500209 +0000 UTC m=+1001.082581674" watchObservedRunningTime="2026-01-29 12:23:06.850604624 +0000 UTC m=+1001.102686079" Jan 29 12:23:08 crc kubenswrapper[4753]: I0129 12:23:08.095807 4753 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="swift-kuttl-tests/openstack-galera-0" Jan 29 12:23:08 crc kubenswrapper[4753]: I0129 12:23:08.096133 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="swift-kuttl-tests/openstack-galera-0" Jan 29 12:23:08 crc kubenswrapper[4753]: I0129 12:23:08.491995 4753 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="swift-kuttl-tests/openstack-galera-1" Jan 29 12:23:08 crc kubenswrapper[4753]: I0129 12:23:08.494858 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="swift-kuttl-tests/openstack-galera-1" Jan 29 12:23:08 crc kubenswrapper[4753]: I0129 12:23:08.495317 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="swift-kuttl-tests/openstack-galera-2" Jan 29 12:23:08 crc kubenswrapper[4753]: I0129 12:23:08.495874 4753 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="swift-kuttl-tests/openstack-galera-2" Jan 29 12:23:08 crc kubenswrapper[4753]: I0129 12:23:08.622894 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-index-w4plk"] Jan 29 12:23:08 crc kubenswrapper[4753]: I0129 12:23:08.900947 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-index-w4plk" event={"ID":"d629e968-732d-4000-9ad9-16dceefa3077","Type":"ContainerStarted","Data":"5e7d5d189fdd47d22157692c38747b67faa0c47a4ee722ffb29a075ec326403b"} Jan 29 12:23:09 crc kubenswrapper[4753]: I0129 12:23:09.322159 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-r9bb2"] Jan 29 12:23:09 crc kubenswrapper[4753]: I0129 12:23:09.326274 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-r9bb2" Jan 29 12:23:09 crc kubenswrapper[4753]: I0129 12:23:09.396878 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-r9bb2"] Jan 29 12:23:09 crc kubenswrapper[4753]: I0129 12:23:09.400071 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0270e702-8bd1-48e1-9356-e59def1ddd0a-utilities\") pod \"certified-operators-r9bb2\" (UID: \"0270e702-8bd1-48e1-9356-e59def1ddd0a\") " pod="openshift-marketplace/certified-operators-r9bb2" Jan 29 12:23:09 crc kubenswrapper[4753]: I0129 12:23:09.400130 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0270e702-8bd1-48e1-9356-e59def1ddd0a-catalog-content\") pod \"certified-operators-r9bb2\" (UID: \"0270e702-8bd1-48e1-9356-e59def1ddd0a\") " pod="openshift-marketplace/certified-operators-r9bb2" Jan 29 12:23:09 crc kubenswrapper[4753]: I0129 12:23:09.400240 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wnm7d\" (UniqueName: \"kubernetes.io/projected/0270e702-8bd1-48e1-9356-e59def1ddd0a-kube-api-access-wnm7d\") pod \"certified-operators-r9bb2\" (UID: \"0270e702-8bd1-48e1-9356-e59def1ddd0a\") " pod="openshift-marketplace/certified-operators-r9bb2" Jan 29 12:23:09 crc kubenswrapper[4753]: I0129 12:23:09.501442 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wnm7d\" (UniqueName: \"kubernetes.io/projected/0270e702-8bd1-48e1-9356-e59def1ddd0a-kube-api-access-wnm7d\") pod \"certified-operators-r9bb2\" (UID: \"0270e702-8bd1-48e1-9356-e59def1ddd0a\") " pod="openshift-marketplace/certified-operators-r9bb2" Jan 29 12:23:09 crc kubenswrapper[4753]: I0129 12:23:09.501547 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0270e702-8bd1-48e1-9356-e59def1ddd0a-utilities\") pod \"certified-operators-r9bb2\" (UID: \"0270e702-8bd1-48e1-9356-e59def1ddd0a\") " pod="openshift-marketplace/certified-operators-r9bb2" Jan 29 12:23:09 crc kubenswrapper[4753]: I0129 12:23:09.501577 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0270e702-8bd1-48e1-9356-e59def1ddd0a-catalog-content\") pod \"certified-operators-r9bb2\" (UID: \"0270e702-8bd1-48e1-9356-e59def1ddd0a\") " pod="openshift-marketplace/certified-operators-r9bb2" Jan 29 12:23:09 crc kubenswrapper[4753]: I0129 12:23:09.502244 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0270e702-8bd1-48e1-9356-e59def1ddd0a-catalog-content\") pod \"certified-operators-r9bb2\" (UID: \"0270e702-8bd1-48e1-9356-e59def1ddd0a\") " pod="openshift-marketplace/certified-operators-r9bb2" Jan 29 12:23:09 crc kubenswrapper[4753]: I0129 12:23:09.502951 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0270e702-8bd1-48e1-9356-e59def1ddd0a-utilities\") pod \"certified-operators-r9bb2\" (UID: \"0270e702-8bd1-48e1-9356-e59def1ddd0a\") " pod="openshift-marketplace/certified-operators-r9bb2" Jan 29 12:23:09 crc kubenswrapper[4753]: I0129 12:23:09.532615 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wnm7d\" (UniqueName: \"kubernetes.io/projected/0270e702-8bd1-48e1-9356-e59def1ddd0a-kube-api-access-wnm7d\") pod \"certified-operators-r9bb2\" (UID: \"0270e702-8bd1-48e1-9356-e59def1ddd0a\") " pod="openshift-marketplace/certified-operators-r9bb2" Jan 29 12:23:09 crc kubenswrapper[4753]: I0129 12:23:09.723027 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-r9bb2" Jan 29 12:23:09 crc kubenswrapper[4753]: I0129 12:23:09.918048 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/memcached-0" event={"ID":"bd1f7149-df12-4c02-9585-238bfd26f12d","Type":"ContainerStarted","Data":"7e2563e7ba256c98d58bb645a318e00f67fdfe4d6d9a7c0aad6f2e2e9bb5165c"} Jan 29 12:23:09 crc kubenswrapper[4753]: I0129 12:23:09.918407 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="swift-kuttl-tests/memcached-0" Jan 29 12:23:09 crc kubenswrapper[4753]: I0129 12:23:09.947918 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="swift-kuttl-tests/memcached-0" podStartSLOduration=3.099101517 podStartE2EDuration="6.947893865s" podCreationTimestamp="2026-01-29 12:23:03 +0000 UTC" firstStartedPulling="2026-01-29 12:23:03.907723669 +0000 UTC m=+998.159805114" lastFinishedPulling="2026-01-29 12:23:07.756516007 +0000 UTC m=+1002.008597462" observedRunningTime="2026-01-29 12:23:09.94771895 +0000 UTC m=+1004.199800415" watchObservedRunningTime="2026-01-29 12:23:09.947893865 +0000 UTC m=+1004.199975320" Jan 29 12:23:10 crc kubenswrapper[4753]: I0129 12:23:10.542257 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-r9bb2"] Jan 29 12:23:10 crc kubenswrapper[4753]: W0129 12:23:10.555797 4753 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0270e702_8bd1_48e1_9356_e59def1ddd0a.slice/crio-f0adb5dd82c505c6e0b8203ac0e35584573c351c2ea3cfbcd2a7eb10050ab469 WatchSource:0}: Error finding container f0adb5dd82c505c6e0b8203ac0e35584573c351c2ea3cfbcd2a7eb10050ab469: Status 404 returned error can't find the container with id f0adb5dd82c505c6e0b8203ac0e35584573c351c2ea3cfbcd2a7eb10050ab469 Jan 29 12:23:11 crc kubenswrapper[4753]: I0129 12:23:11.082640 4753 generic.go:334] "Generic (PLEG): container finished" podID="0270e702-8bd1-48e1-9356-e59def1ddd0a" containerID="5ff164143eec13d0d475cbdf3c961d5023d182e1d5a822c99468503cac048192" exitCode=0 Jan 29 12:23:11 crc kubenswrapper[4753]: I0129 12:23:11.084292 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r9bb2" event={"ID":"0270e702-8bd1-48e1-9356-e59def1ddd0a","Type":"ContainerDied","Data":"5ff164143eec13d0d475cbdf3c961d5023d182e1d5a822c99468503cac048192"} Jan 29 12:23:11 crc kubenswrapper[4753]: I0129 12:23:11.084421 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r9bb2" event={"ID":"0270e702-8bd1-48e1-9356-e59def1ddd0a","Type":"ContainerStarted","Data":"f0adb5dd82c505c6e0b8203ac0e35584573c351c2ea3cfbcd2a7eb10050ab469"} Jan 29 12:23:12 crc kubenswrapper[4753]: I0129 12:23:12.122700 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-lkj8l"] Jan 29 12:23:12 crc kubenswrapper[4753]: I0129 12:23:12.123452 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-lkj8l" podUID="f61be96f-9f6c-42c3-a177-afa775bdee56" containerName="registry-server" containerID="cri-o://9cffd21ffcaf4fa14d778002e6bc280011ec737e7f0f1dbac08e912221640025" gracePeriod=2 Jan 29 12:23:13 crc kubenswrapper[4753]: I0129 12:23:13.377776 4753 generic.go:334] "Generic (PLEG): container finished" podID="f61be96f-9f6c-42c3-a177-afa775bdee56" containerID="9cffd21ffcaf4fa14d778002e6bc280011ec737e7f0f1dbac08e912221640025" exitCode=0 Jan 29 12:23:13 crc kubenswrapper[4753]: I0129 12:23:13.377830 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lkj8l" event={"ID":"f61be96f-9f6c-42c3-a177-afa775bdee56","Type":"ContainerDied","Data":"9cffd21ffcaf4fa14d778002e6bc280011ec737e7f0f1dbac08e912221640025"} Jan 29 12:23:13 crc kubenswrapper[4753]: I0129 12:23:13.554351 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="swift-kuttl-tests/memcached-0" Jan 29 12:23:14 crc kubenswrapper[4753]: I0129 12:23:14.521785 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r9bb2" event={"ID":"0270e702-8bd1-48e1-9356-e59def1ddd0a","Type":"ContainerStarted","Data":"0a999ce8113967ea24103ad7952557242de710a19d3f65218b45f28152d294d0"} Jan 29 12:23:14 crc kubenswrapper[4753]: I0129 12:23:14.947063 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-lkj8l" Jan 29 12:23:15 crc kubenswrapper[4753]: I0129 12:23:15.123751 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sk6rk\" (UniqueName: \"kubernetes.io/projected/f61be96f-9f6c-42c3-a177-afa775bdee56-kube-api-access-sk6rk\") pod \"f61be96f-9f6c-42c3-a177-afa775bdee56\" (UID: \"f61be96f-9f6c-42c3-a177-afa775bdee56\") " Jan 29 12:23:15 crc kubenswrapper[4753]: I0129 12:23:15.123958 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f61be96f-9f6c-42c3-a177-afa775bdee56-catalog-content\") pod \"f61be96f-9f6c-42c3-a177-afa775bdee56\" (UID: \"f61be96f-9f6c-42c3-a177-afa775bdee56\") " Jan 29 12:23:15 crc kubenswrapper[4753]: I0129 12:23:15.123998 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f61be96f-9f6c-42c3-a177-afa775bdee56-utilities\") pod \"f61be96f-9f6c-42c3-a177-afa775bdee56\" (UID: \"f61be96f-9f6c-42c3-a177-afa775bdee56\") " Jan 29 12:23:15 crc kubenswrapper[4753]: I0129 12:23:15.126026 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f61be96f-9f6c-42c3-a177-afa775bdee56-utilities" (OuterVolumeSpecName: "utilities") pod "f61be96f-9f6c-42c3-a177-afa775bdee56" (UID: "f61be96f-9f6c-42c3-a177-afa775bdee56"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:23:15 crc kubenswrapper[4753]: I0129 12:23:15.149647 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f61be96f-9f6c-42c3-a177-afa775bdee56-kube-api-access-sk6rk" (OuterVolumeSpecName: "kube-api-access-sk6rk") pod "f61be96f-9f6c-42c3-a177-afa775bdee56" (UID: "f61be96f-9f6c-42c3-a177-afa775bdee56"). InnerVolumeSpecName "kube-api-access-sk6rk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:23:15 crc kubenswrapper[4753]: I0129 12:23:15.197492 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f61be96f-9f6c-42c3-a177-afa775bdee56-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f61be96f-9f6c-42c3-a177-afa775bdee56" (UID: "f61be96f-9f6c-42c3-a177-afa775bdee56"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:23:15 crc kubenswrapper[4753]: I0129 12:23:15.225448 4753 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f61be96f-9f6c-42c3-a177-afa775bdee56-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 12:23:15 crc kubenswrapper[4753]: I0129 12:23:15.225496 4753 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f61be96f-9f6c-42c3-a177-afa775bdee56-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 12:23:15 crc kubenswrapper[4753]: I0129 12:23:15.225508 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sk6rk\" (UniqueName: \"kubernetes.io/projected/f61be96f-9f6c-42c3-a177-afa775bdee56-kube-api-access-sk6rk\") on node \"crc\" DevicePath \"\"" Jan 29 12:23:15 crc kubenswrapper[4753]: I0129 12:23:15.538673 4753 generic.go:334] "Generic (PLEG): container finished" podID="0270e702-8bd1-48e1-9356-e59def1ddd0a" containerID="0a999ce8113967ea24103ad7952557242de710a19d3f65218b45f28152d294d0" exitCode=0 Jan 29 12:23:15 crc kubenswrapper[4753]: I0129 12:23:15.538926 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r9bb2" event={"ID":"0270e702-8bd1-48e1-9356-e59def1ddd0a","Type":"ContainerDied","Data":"0a999ce8113967ea24103ad7952557242de710a19d3f65218b45f28152d294d0"} Jan 29 12:23:15 crc kubenswrapper[4753]: I0129 12:23:15.546644 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lkj8l" event={"ID":"f61be96f-9f6c-42c3-a177-afa775bdee56","Type":"ContainerDied","Data":"7b07076de8fbfbda644a70c1e09865b81f65d3dda7d097dde93bf853361c089d"} Jan 29 12:23:15 crc kubenswrapper[4753]: I0129 12:23:15.546724 4753 scope.go:117] "RemoveContainer" containerID="9cffd21ffcaf4fa14d778002e6bc280011ec737e7f0f1dbac08e912221640025" Jan 29 12:23:15 crc kubenswrapper[4753]: I0129 12:23:15.546883 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-lkj8l" Jan 29 12:23:15 crc kubenswrapper[4753]: I0129 12:23:15.586297 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-lkj8l"] Jan 29 12:23:15 crc kubenswrapper[4753]: I0129 12:23:15.594202 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-lkj8l"] Jan 29 12:23:15 crc kubenswrapper[4753]: I0129 12:23:15.651710 4753 scope.go:117] "RemoveContainer" containerID="26653a3054e3b6fb02a21626146208eea029d9dba5bb9f97ec5c1163b2744615" Jan 29 12:23:15 crc kubenswrapper[4753]: I0129 12:23:15.907080 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f61be96f-9f6c-42c3-a177-afa775bdee56" path="/var/lib/kubelet/pods/f61be96f-9f6c-42c3-a177-afa775bdee56/volumes" Jan 29 12:23:15 crc kubenswrapper[4753]: I0129 12:23:15.962477 4753 scope.go:117] "RemoveContainer" containerID="68ef35eeea67657d51a98b0e425d794a07e9d5edce4a02ad1b406849d5f62e8f" Jan 29 12:23:18 crc kubenswrapper[4753]: I0129 12:23:18.694826 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r9bb2" event={"ID":"0270e702-8bd1-48e1-9356-e59def1ddd0a","Type":"ContainerStarted","Data":"dae613d6299584dfa399fae9791b24fd98b1a763ce95ba9a3efabe6645b5e99c"} Jan 29 12:23:18 crc kubenswrapper[4753]: I0129 12:23:18.701472 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-index-w4plk" event={"ID":"d629e968-732d-4000-9ad9-16dceefa3077","Type":"ContainerStarted","Data":"82330e92e4c837f3d22485e432e105e98fe5e3d0a05c7f6ff321880d34cef51c"} Jan 29 12:23:18 crc kubenswrapper[4753]: I0129 12:23:18.746946 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-r9bb2" podStartSLOduration=3.042692241 podStartE2EDuration="9.74692419s" podCreationTimestamp="2026-01-29 12:23:09 +0000 UTC" firstStartedPulling="2026-01-29 12:23:11.091401348 +0000 UTC m=+1005.343482803" lastFinishedPulling="2026-01-29 12:23:17.795633297 +0000 UTC m=+1012.047714752" observedRunningTime="2026-01-29 12:23:18.742583514 +0000 UTC m=+1012.994664959" watchObservedRunningTime="2026-01-29 12:23:18.74692419 +0000 UTC m=+1012.999005645" Jan 29 12:23:18 crc kubenswrapper[4753]: I0129 12:23:18.762566 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-index-w4plk" podStartSLOduration=3.622737787 podStartE2EDuration="12.762546205s" podCreationTimestamp="2026-01-29 12:23:06 +0000 UTC" firstStartedPulling="2026-01-29 12:23:08.655640623 +0000 UTC m=+1002.907722078" lastFinishedPulling="2026-01-29 12:23:17.795449041 +0000 UTC m=+1012.047530496" observedRunningTime="2026-01-29 12:23:18.760912938 +0000 UTC m=+1013.012994413" watchObservedRunningTime="2026-01-29 12:23:18.762546205 +0000 UTC m=+1013.014627660" Jan 29 12:23:19 crc kubenswrapper[4753]: I0129 12:23:19.537567 4753 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="swift-kuttl-tests/openstack-galera-2" Jan 29 12:23:19 crc kubenswrapper[4753]: I0129 12:23:19.608744 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="swift-kuttl-tests/openstack-galera-2" Jan 29 12:23:19 crc kubenswrapper[4753]: I0129 12:23:19.893203 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-r9bb2" Jan 29 12:23:19 crc kubenswrapper[4753]: I0129 12:23:19.893756 4753 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-r9bb2" Jan 29 12:23:19 crc kubenswrapper[4753]: I0129 12:23:19.956499 4753 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-r9bb2" Jan 29 12:23:22 crc kubenswrapper[4753]: I0129 12:23:22.234815 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-r9bb2" Jan 29 12:23:24 crc kubenswrapper[4753]: I0129 12:23:24.920764 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-r9bb2"] Jan 29 12:23:24 crc kubenswrapper[4753]: I0129 12:23:24.921250 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-r9bb2" podUID="0270e702-8bd1-48e1-9356-e59def1ddd0a" containerName="registry-server" containerID="cri-o://dae613d6299584dfa399fae9791b24fd98b1a763ce95ba9a3efabe6645b5e99c" gracePeriod=2 Jan 29 12:23:26 crc kubenswrapper[4753]: I0129 12:23:26.831315 4753 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/rabbitmq-cluster-operator-index-w4plk" Jan 29 12:23:26 crc kubenswrapper[4753]: I0129 12:23:26.831622 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/rabbitmq-cluster-operator-index-w4plk" Jan 29 12:23:26 crc kubenswrapper[4753]: I0129 12:23:26.863475 4753 generic.go:334] "Generic (PLEG): container finished" podID="0270e702-8bd1-48e1-9356-e59def1ddd0a" containerID="dae613d6299584dfa399fae9791b24fd98b1a763ce95ba9a3efabe6645b5e99c" exitCode=0 Jan 29 12:23:26 crc kubenswrapper[4753]: I0129 12:23:26.863523 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r9bb2" event={"ID":"0270e702-8bd1-48e1-9356-e59def1ddd0a","Type":"ContainerDied","Data":"dae613d6299584dfa399fae9791b24fd98b1a763ce95ba9a3efabe6645b5e99c"} Jan 29 12:23:26 crc kubenswrapper[4753]: I0129 12:23:26.867216 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["swift-kuttl-tests/root-account-create-update-ts8d4"] Jan 29 12:23:26 crc kubenswrapper[4753]: E0129 12:23:26.868670 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f61be96f-9f6c-42c3-a177-afa775bdee56" containerName="extract-content" Jan 29 12:23:26 crc kubenswrapper[4753]: I0129 12:23:26.868712 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="f61be96f-9f6c-42c3-a177-afa775bdee56" containerName="extract-content" Jan 29 12:23:26 crc kubenswrapper[4753]: E0129 12:23:26.868728 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f61be96f-9f6c-42c3-a177-afa775bdee56" containerName="registry-server" Jan 29 12:23:26 crc kubenswrapper[4753]: I0129 12:23:26.868736 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="f61be96f-9f6c-42c3-a177-afa775bdee56" containerName="registry-server" Jan 29 12:23:26 crc kubenswrapper[4753]: E0129 12:23:26.868768 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f61be96f-9f6c-42c3-a177-afa775bdee56" containerName="extract-utilities" Jan 29 12:23:26 crc kubenswrapper[4753]: I0129 12:23:26.868778 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="f61be96f-9f6c-42c3-a177-afa775bdee56" containerName="extract-utilities" Jan 29 12:23:26 crc kubenswrapper[4753]: I0129 12:23:26.868965 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="f61be96f-9f6c-42c3-a177-afa775bdee56" containerName="registry-server" Jan 29 12:23:26 crc kubenswrapper[4753]: I0129 12:23:26.869639 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/root-account-create-update-ts8d4" Jan 29 12:23:26 crc kubenswrapper[4753]: I0129 12:23:26.876387 4753 reflector.go:368] Caches populated for *v1.Secret from object-"swift-kuttl-tests"/"openstack-mariadb-root-db-secret" Jan 29 12:23:26 crc kubenswrapper[4753]: I0129 12:23:26.881023 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/root-account-create-update-ts8d4"] Jan 29 12:23:26 crc kubenswrapper[4753]: I0129 12:23:26.887675 4753 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/rabbitmq-cluster-operator-index-w4plk" Jan 29 12:23:26 crc kubenswrapper[4753]: I0129 12:23:26.918962 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/rabbitmq-cluster-operator-index-w4plk" Jan 29 12:23:26 crc kubenswrapper[4753]: I0129 12:23:26.932711 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hq2gs\" (UniqueName: \"kubernetes.io/projected/62c6dedf-3f69-486b-9dac-2fcef82d1571-kube-api-access-hq2gs\") pod \"root-account-create-update-ts8d4\" (UID: \"62c6dedf-3f69-486b-9dac-2fcef82d1571\") " pod="swift-kuttl-tests/root-account-create-update-ts8d4" Jan 29 12:23:26 crc kubenswrapper[4753]: I0129 12:23:26.932858 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/62c6dedf-3f69-486b-9dac-2fcef82d1571-operator-scripts\") pod \"root-account-create-update-ts8d4\" (UID: \"62c6dedf-3f69-486b-9dac-2fcef82d1571\") " pod="swift-kuttl-tests/root-account-create-update-ts8d4" Jan 29 12:23:26 crc kubenswrapper[4753]: E0129 12:23:26.979963 4753 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.129.56.211:34032->38.129.56.211:43257: write tcp 38.129.56.211:34032->38.129.56.211:43257: write: broken pipe Jan 29 12:23:27 crc kubenswrapper[4753]: I0129 12:23:27.033573 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/62c6dedf-3f69-486b-9dac-2fcef82d1571-operator-scripts\") pod \"root-account-create-update-ts8d4\" (UID: \"62c6dedf-3f69-486b-9dac-2fcef82d1571\") " pod="swift-kuttl-tests/root-account-create-update-ts8d4" Jan 29 12:23:27 crc kubenswrapper[4753]: I0129 12:23:27.033904 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hq2gs\" (UniqueName: \"kubernetes.io/projected/62c6dedf-3f69-486b-9dac-2fcef82d1571-kube-api-access-hq2gs\") pod \"root-account-create-update-ts8d4\" (UID: \"62c6dedf-3f69-486b-9dac-2fcef82d1571\") " pod="swift-kuttl-tests/root-account-create-update-ts8d4" Jan 29 12:23:27 crc kubenswrapper[4753]: I0129 12:23:27.034798 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/62c6dedf-3f69-486b-9dac-2fcef82d1571-operator-scripts\") pod \"root-account-create-update-ts8d4\" (UID: \"62c6dedf-3f69-486b-9dac-2fcef82d1571\") " pod="swift-kuttl-tests/root-account-create-update-ts8d4" Jan 29 12:23:27 crc kubenswrapper[4753]: I0129 12:23:27.069942 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hq2gs\" (UniqueName: \"kubernetes.io/projected/62c6dedf-3f69-486b-9dac-2fcef82d1571-kube-api-access-hq2gs\") pod \"root-account-create-update-ts8d4\" (UID: \"62c6dedf-3f69-486b-9dac-2fcef82d1571\") " pod="swift-kuttl-tests/root-account-create-update-ts8d4" Jan 29 12:23:27 crc kubenswrapper[4753]: I0129 12:23:27.195853 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/root-account-create-update-ts8d4" Jan 29 12:23:27 crc kubenswrapper[4753]: I0129 12:23:27.484726 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-r9bb2" Jan 29 12:23:27 crc kubenswrapper[4753]: I0129 12:23:27.509030 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wnm7d\" (UniqueName: \"kubernetes.io/projected/0270e702-8bd1-48e1-9356-e59def1ddd0a-kube-api-access-wnm7d\") pod \"0270e702-8bd1-48e1-9356-e59def1ddd0a\" (UID: \"0270e702-8bd1-48e1-9356-e59def1ddd0a\") " Jan 29 12:23:27 crc kubenswrapper[4753]: I0129 12:23:27.509069 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0270e702-8bd1-48e1-9356-e59def1ddd0a-catalog-content\") pod \"0270e702-8bd1-48e1-9356-e59def1ddd0a\" (UID: \"0270e702-8bd1-48e1-9356-e59def1ddd0a\") " Jan 29 12:23:27 crc kubenswrapper[4753]: I0129 12:23:27.509129 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0270e702-8bd1-48e1-9356-e59def1ddd0a-utilities\") pod \"0270e702-8bd1-48e1-9356-e59def1ddd0a\" (UID: \"0270e702-8bd1-48e1-9356-e59def1ddd0a\") " Jan 29 12:23:27 crc kubenswrapper[4753]: I0129 12:23:27.510427 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0270e702-8bd1-48e1-9356-e59def1ddd0a-utilities" (OuterVolumeSpecName: "utilities") pod "0270e702-8bd1-48e1-9356-e59def1ddd0a" (UID: "0270e702-8bd1-48e1-9356-e59def1ddd0a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:23:27 crc kubenswrapper[4753]: I0129 12:23:27.523505 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0270e702-8bd1-48e1-9356-e59def1ddd0a-kube-api-access-wnm7d" (OuterVolumeSpecName: "kube-api-access-wnm7d") pod "0270e702-8bd1-48e1-9356-e59def1ddd0a" (UID: "0270e702-8bd1-48e1-9356-e59def1ddd0a"). InnerVolumeSpecName "kube-api-access-wnm7d". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:23:27 crc kubenswrapper[4753]: I0129 12:23:27.571247 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0270e702-8bd1-48e1-9356-e59def1ddd0a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0270e702-8bd1-48e1-9356-e59def1ddd0a" (UID: "0270e702-8bd1-48e1-9356-e59def1ddd0a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:23:27 crc kubenswrapper[4753]: I0129 12:23:27.610545 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wnm7d\" (UniqueName: \"kubernetes.io/projected/0270e702-8bd1-48e1-9356-e59def1ddd0a-kube-api-access-wnm7d\") on node \"crc\" DevicePath \"\"" Jan 29 12:23:27 crc kubenswrapper[4753]: I0129 12:23:27.610583 4753 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0270e702-8bd1-48e1-9356-e59def1ddd0a-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 12:23:27 crc kubenswrapper[4753]: I0129 12:23:27.610594 4753 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0270e702-8bd1-48e1-9356-e59def1ddd0a-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 12:23:27 crc kubenswrapper[4753]: I0129 12:23:27.782378 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/root-account-create-update-ts8d4"] Jan 29 12:23:27 crc kubenswrapper[4753]: I0129 12:23:27.872058 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/root-account-create-update-ts8d4" event={"ID":"62c6dedf-3f69-486b-9dac-2fcef82d1571","Type":"ContainerStarted","Data":"e69a5da43dd90d9d736e2d6daf8032b78111d3dc2240d90b7c964da9976fa93d"} Jan 29 12:23:27 crc kubenswrapper[4753]: I0129 12:23:27.874986 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r9bb2" event={"ID":"0270e702-8bd1-48e1-9356-e59def1ddd0a","Type":"ContainerDied","Data":"f0adb5dd82c505c6e0b8203ac0e35584573c351c2ea3cfbcd2a7eb10050ab469"} Jan 29 12:23:27 crc kubenswrapper[4753]: I0129 12:23:27.875064 4753 scope.go:117] "RemoveContainer" containerID="dae613d6299584dfa399fae9791b24fd98b1a763ce95ba9a3efabe6645b5e99c" Jan 29 12:23:27 crc kubenswrapper[4753]: I0129 12:23:27.875012 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-r9bb2" Jan 29 12:23:27 crc kubenswrapper[4753]: I0129 12:23:27.894435 4753 scope.go:117] "RemoveContainer" containerID="0a999ce8113967ea24103ad7952557242de710a19d3f65218b45f28152d294d0" Jan 29 12:23:27 crc kubenswrapper[4753]: I0129 12:23:27.941664 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-r9bb2"] Jan 29 12:23:27 crc kubenswrapper[4753]: I0129 12:23:27.941702 4753 scope.go:117] "RemoveContainer" containerID="5ff164143eec13d0d475cbdf3c961d5023d182e1d5a822c99468503cac048192" Jan 29 12:23:27 crc kubenswrapper[4753]: I0129 12:23:27.951521 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-r9bb2"] Jan 29 12:23:28 crc kubenswrapper[4753]: I0129 12:23:28.523100 4753 prober.go:107] "Probe failed" probeType="Readiness" pod="swift-kuttl-tests/openstack-galera-2" podUID="cc8c9706-0165-4cc6-ad22-bc70f03d5bf9" containerName="galera" probeResult="failure" output=< Jan 29 12:23:28 crc kubenswrapper[4753]: wsrep_local_state_comment (Donor/Desynced) differs from Synced Jan 29 12:23:28 crc kubenswrapper[4753]: > Jan 29 12:23:28 crc kubenswrapper[4753]: I0129 12:23:28.884911 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/root-account-create-update-ts8d4" event={"ID":"62c6dedf-3f69-486b-9dac-2fcef82d1571","Type":"ContainerStarted","Data":"b3818d112e3c7af7b22d4dc943951ccfb82acd520a7a99de288b20ecf4303c44"} Jan 29 12:23:28 crc kubenswrapper[4753]: I0129 12:23:28.922356 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="swift-kuttl-tests/root-account-create-update-ts8d4" podStartSLOduration=2.9223277469999998 podStartE2EDuration="2.922327747s" podCreationTimestamp="2026-01-29 12:23:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:23:28.910927576 +0000 UTC m=+1023.163009041" watchObservedRunningTime="2026-01-29 12:23:28.922327747 +0000 UTC m=+1023.174409202" Jan 29 12:23:29 crc kubenswrapper[4753]: I0129 12:23:29.252920 4753 patch_prober.go:28] interesting pod/machine-config-daemon-7c24x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 12:23:29 crc kubenswrapper[4753]: I0129 12:23:29.252991 4753 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" podUID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 12:23:29 crc kubenswrapper[4753]: I0129 12:23:29.976277 4753 generic.go:334] "Generic (PLEG): container finished" podID="62c6dedf-3f69-486b-9dac-2fcef82d1571" containerID="b3818d112e3c7af7b22d4dc943951ccfb82acd520a7a99de288b20ecf4303c44" exitCode=0 Jan 29 12:23:30 crc kubenswrapper[4753]: I0129 12:23:29.998672 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0270e702-8bd1-48e1-9356-e59def1ddd0a" path="/var/lib/kubelet/pods/0270e702-8bd1-48e1-9356-e59def1ddd0a/volumes" Jan 29 12:23:30 crc kubenswrapper[4753]: I0129 12:23:29.999384 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/root-account-create-update-ts8d4" event={"ID":"62c6dedf-3f69-486b-9dac-2fcef82d1571","Type":"ContainerDied","Data":"b3818d112e3c7af7b22d4dc943951ccfb82acd520a7a99de288b20ecf4303c44"} Jan 29 12:23:31 crc kubenswrapper[4753]: I0129 12:23:31.263744 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/root-account-create-update-ts8d4" Jan 29 12:23:31 crc kubenswrapper[4753]: I0129 12:23:31.374480 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/62c6dedf-3f69-486b-9dac-2fcef82d1571-operator-scripts\") pod \"62c6dedf-3f69-486b-9dac-2fcef82d1571\" (UID: \"62c6dedf-3f69-486b-9dac-2fcef82d1571\") " Jan 29 12:23:31 crc kubenswrapper[4753]: I0129 12:23:31.374606 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hq2gs\" (UniqueName: \"kubernetes.io/projected/62c6dedf-3f69-486b-9dac-2fcef82d1571-kube-api-access-hq2gs\") pod \"62c6dedf-3f69-486b-9dac-2fcef82d1571\" (UID: \"62c6dedf-3f69-486b-9dac-2fcef82d1571\") " Jan 29 12:23:31 crc kubenswrapper[4753]: I0129 12:23:31.375925 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/62c6dedf-3f69-486b-9dac-2fcef82d1571-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "62c6dedf-3f69-486b-9dac-2fcef82d1571" (UID: "62c6dedf-3f69-486b-9dac-2fcef82d1571"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:23:31 crc kubenswrapper[4753]: I0129 12:23:31.376576 4753 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/62c6dedf-3f69-486b-9dac-2fcef82d1571-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 12:23:31 crc kubenswrapper[4753]: I0129 12:23:31.381706 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/62c6dedf-3f69-486b-9dac-2fcef82d1571-kube-api-access-hq2gs" (OuterVolumeSpecName: "kube-api-access-hq2gs") pod "62c6dedf-3f69-486b-9dac-2fcef82d1571" (UID: "62c6dedf-3f69-486b-9dac-2fcef82d1571"). InnerVolumeSpecName "kube-api-access-hq2gs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:23:31 crc kubenswrapper[4753]: I0129 12:23:31.477989 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hq2gs\" (UniqueName: \"kubernetes.io/projected/62c6dedf-3f69-486b-9dac-2fcef82d1571-kube-api-access-hq2gs\") on node \"crc\" DevicePath \"\"" Jan 29 12:23:31 crc kubenswrapper[4753]: I0129 12:23:31.990552 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/root-account-create-update-ts8d4" event={"ID":"62c6dedf-3f69-486b-9dac-2fcef82d1571","Type":"ContainerDied","Data":"e69a5da43dd90d9d736e2d6daf8032b78111d3dc2240d90b7c964da9976fa93d"} Jan 29 12:23:31 crc kubenswrapper[4753]: I0129 12:23:31.990602 4753 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e69a5da43dd90d9d736e2d6daf8032b78111d3dc2240d90b7c964da9976fa93d" Jan 29 12:23:31 crc kubenswrapper[4753]: I0129 12:23:31.990663 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/root-account-create-update-ts8d4" Jan 29 12:23:33 crc kubenswrapper[4753]: I0129 12:23:33.188365 4753 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="swift-kuttl-tests/openstack-galera-0" Jan 29 12:23:33 crc kubenswrapper[4753]: I0129 12:23:33.255836 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="swift-kuttl-tests/openstack-galera-0" Jan 29 12:23:36 crc kubenswrapper[4753]: I0129 12:23:36.936348 4753 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="swift-kuttl-tests/openstack-galera-1" Jan 29 12:23:37 crc kubenswrapper[4753]: I0129 12:23:37.005962 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="swift-kuttl-tests/openstack-galera-1" Jan 29 12:23:42 crc kubenswrapper[4753]: I0129 12:23:42.786495 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590mk8wv"] Jan 29 12:23:42 crc kubenswrapper[4753]: E0129 12:23:42.788583 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0270e702-8bd1-48e1-9356-e59def1ddd0a" containerName="extract-content" Jan 29 12:23:42 crc kubenswrapper[4753]: I0129 12:23:42.788720 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="0270e702-8bd1-48e1-9356-e59def1ddd0a" containerName="extract-content" Jan 29 12:23:42 crc kubenswrapper[4753]: E0129 12:23:42.788841 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="62c6dedf-3f69-486b-9dac-2fcef82d1571" containerName="mariadb-account-create-update" Jan 29 12:23:42 crc kubenswrapper[4753]: I0129 12:23:42.788948 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="62c6dedf-3f69-486b-9dac-2fcef82d1571" containerName="mariadb-account-create-update" Jan 29 12:23:42 crc kubenswrapper[4753]: E0129 12:23:42.789085 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0270e702-8bd1-48e1-9356-e59def1ddd0a" containerName="registry-server" Jan 29 12:23:42 crc kubenswrapper[4753]: I0129 12:23:42.789189 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="0270e702-8bd1-48e1-9356-e59def1ddd0a" containerName="registry-server" Jan 29 12:23:42 crc kubenswrapper[4753]: E0129 12:23:42.789324 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0270e702-8bd1-48e1-9356-e59def1ddd0a" containerName="extract-utilities" Jan 29 12:23:42 crc kubenswrapper[4753]: I0129 12:23:42.789416 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="0270e702-8bd1-48e1-9356-e59def1ddd0a" containerName="extract-utilities" Jan 29 12:23:42 crc kubenswrapper[4753]: I0129 12:23:42.789714 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="0270e702-8bd1-48e1-9356-e59def1ddd0a" containerName="registry-server" Jan 29 12:23:42 crc kubenswrapper[4753]: I0129 12:23:42.789844 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="62c6dedf-3f69-486b-9dac-2fcef82d1571" containerName="mariadb-account-create-update" Jan 29 12:23:42 crc kubenswrapper[4753]: I0129 12:23:42.791219 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590mk8wv" Jan 29 12:23:42 crc kubenswrapper[4753]: I0129 12:23:42.793518 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590mk8wv"] Jan 29 12:23:42 crc kubenswrapper[4753]: I0129 12:23:42.794130 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-j2jmz" Jan 29 12:23:42 crc kubenswrapper[4753]: I0129 12:23:42.871451 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/6ae1b286-c72f-4430-8504-1144b7265ebb-bundle\") pod \"9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590mk8wv\" (UID: \"6ae1b286-c72f-4430-8504-1144b7265ebb\") " pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590mk8wv" Jan 29 12:23:42 crc kubenswrapper[4753]: I0129 12:23:42.871516 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-78bgb\" (UniqueName: \"kubernetes.io/projected/6ae1b286-c72f-4430-8504-1144b7265ebb-kube-api-access-78bgb\") pod \"9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590mk8wv\" (UID: \"6ae1b286-c72f-4430-8504-1144b7265ebb\") " pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590mk8wv" Jan 29 12:23:42 crc kubenswrapper[4753]: I0129 12:23:42.871546 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/6ae1b286-c72f-4430-8504-1144b7265ebb-util\") pod \"9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590mk8wv\" (UID: \"6ae1b286-c72f-4430-8504-1144b7265ebb\") " pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590mk8wv" Jan 29 12:23:42 crc kubenswrapper[4753]: I0129 12:23:42.972526 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/6ae1b286-c72f-4430-8504-1144b7265ebb-bundle\") pod \"9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590mk8wv\" (UID: \"6ae1b286-c72f-4430-8504-1144b7265ebb\") " pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590mk8wv" Jan 29 12:23:42 crc kubenswrapper[4753]: I0129 12:23:42.972613 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-78bgb\" (UniqueName: \"kubernetes.io/projected/6ae1b286-c72f-4430-8504-1144b7265ebb-kube-api-access-78bgb\") pod \"9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590mk8wv\" (UID: \"6ae1b286-c72f-4430-8504-1144b7265ebb\") " pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590mk8wv" Jan 29 12:23:42 crc kubenswrapper[4753]: I0129 12:23:42.972673 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/6ae1b286-c72f-4430-8504-1144b7265ebb-util\") pod \"9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590mk8wv\" (UID: \"6ae1b286-c72f-4430-8504-1144b7265ebb\") " pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590mk8wv" Jan 29 12:23:42 crc kubenswrapper[4753]: I0129 12:23:42.973748 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/6ae1b286-c72f-4430-8504-1144b7265ebb-bundle\") pod \"9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590mk8wv\" (UID: \"6ae1b286-c72f-4430-8504-1144b7265ebb\") " pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590mk8wv" Jan 29 12:23:42 crc kubenswrapper[4753]: I0129 12:23:42.973999 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/6ae1b286-c72f-4430-8504-1144b7265ebb-util\") pod \"9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590mk8wv\" (UID: \"6ae1b286-c72f-4430-8504-1144b7265ebb\") " pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590mk8wv" Jan 29 12:23:42 crc kubenswrapper[4753]: I0129 12:23:42.994939 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-78bgb\" (UniqueName: \"kubernetes.io/projected/6ae1b286-c72f-4430-8504-1144b7265ebb-kube-api-access-78bgb\") pod \"9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590mk8wv\" (UID: \"6ae1b286-c72f-4430-8504-1144b7265ebb\") " pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590mk8wv" Jan 29 12:23:43 crc kubenswrapper[4753]: I0129 12:23:43.124817 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590mk8wv" Jan 29 12:23:43 crc kubenswrapper[4753]: I0129 12:23:43.566814 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590mk8wv"] Jan 29 12:23:44 crc kubenswrapper[4753]: I0129 12:23:44.088941 4753 generic.go:334] "Generic (PLEG): container finished" podID="6ae1b286-c72f-4430-8504-1144b7265ebb" containerID="62733507a6a0151e3bae922b7f73904e54600ead2093a140da3d91e0754f0092" exitCode=0 Jan 29 12:23:44 crc kubenswrapper[4753]: I0129 12:23:44.088996 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590mk8wv" event={"ID":"6ae1b286-c72f-4430-8504-1144b7265ebb","Type":"ContainerDied","Data":"62733507a6a0151e3bae922b7f73904e54600ead2093a140da3d91e0754f0092"} Jan 29 12:23:44 crc kubenswrapper[4753]: I0129 12:23:44.089051 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590mk8wv" event={"ID":"6ae1b286-c72f-4430-8504-1144b7265ebb","Type":"ContainerStarted","Data":"07e66a2cda431d5fe15f42ed7fcae7290780b8d7e443786501df5cbdf8493385"} Jan 29 12:23:45 crc kubenswrapper[4753]: I0129 12:23:45.099238 4753 generic.go:334] "Generic (PLEG): container finished" podID="6ae1b286-c72f-4430-8504-1144b7265ebb" containerID="c8477d3a3cf990369d0f904142726f4c467abf348262470a40ead8e3bb718e21" exitCode=0 Jan 29 12:23:45 crc kubenswrapper[4753]: I0129 12:23:45.099288 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590mk8wv" event={"ID":"6ae1b286-c72f-4430-8504-1144b7265ebb","Type":"ContainerDied","Data":"c8477d3a3cf990369d0f904142726f4c467abf348262470a40ead8e3bb718e21"} Jan 29 12:23:46 crc kubenswrapper[4753]: I0129 12:23:46.115351 4753 generic.go:334] "Generic (PLEG): container finished" podID="6ae1b286-c72f-4430-8504-1144b7265ebb" containerID="53b7eb75d85acb11ab4df368734c49a9684c81baf557ab0eaa764632ccc57550" exitCode=0 Jan 29 12:23:46 crc kubenswrapper[4753]: I0129 12:23:46.115424 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590mk8wv" event={"ID":"6ae1b286-c72f-4430-8504-1144b7265ebb","Type":"ContainerDied","Data":"53b7eb75d85acb11ab4df368734c49a9684c81baf557ab0eaa764632ccc57550"} Jan 29 12:23:47 crc kubenswrapper[4753]: I0129 12:23:47.413680 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590mk8wv" Jan 29 12:23:47 crc kubenswrapper[4753]: I0129 12:23:47.609489 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/6ae1b286-c72f-4430-8504-1144b7265ebb-bundle\") pod \"6ae1b286-c72f-4430-8504-1144b7265ebb\" (UID: \"6ae1b286-c72f-4430-8504-1144b7265ebb\") " Jan 29 12:23:47 crc kubenswrapper[4753]: I0129 12:23:47.609632 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-78bgb\" (UniqueName: \"kubernetes.io/projected/6ae1b286-c72f-4430-8504-1144b7265ebb-kube-api-access-78bgb\") pod \"6ae1b286-c72f-4430-8504-1144b7265ebb\" (UID: \"6ae1b286-c72f-4430-8504-1144b7265ebb\") " Jan 29 12:23:47 crc kubenswrapper[4753]: I0129 12:23:47.609733 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/6ae1b286-c72f-4430-8504-1144b7265ebb-util\") pod \"6ae1b286-c72f-4430-8504-1144b7265ebb\" (UID: \"6ae1b286-c72f-4430-8504-1144b7265ebb\") " Jan 29 12:23:47 crc kubenswrapper[4753]: I0129 12:23:47.610456 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6ae1b286-c72f-4430-8504-1144b7265ebb-bundle" (OuterVolumeSpecName: "bundle") pod "6ae1b286-c72f-4430-8504-1144b7265ebb" (UID: "6ae1b286-c72f-4430-8504-1144b7265ebb"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:23:47 crc kubenswrapper[4753]: I0129 12:23:47.615729 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ae1b286-c72f-4430-8504-1144b7265ebb-kube-api-access-78bgb" (OuterVolumeSpecName: "kube-api-access-78bgb") pod "6ae1b286-c72f-4430-8504-1144b7265ebb" (UID: "6ae1b286-c72f-4430-8504-1144b7265ebb"). InnerVolumeSpecName "kube-api-access-78bgb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:23:47 crc kubenswrapper[4753]: I0129 12:23:47.631844 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6ae1b286-c72f-4430-8504-1144b7265ebb-util" (OuterVolumeSpecName: "util") pod "6ae1b286-c72f-4430-8504-1144b7265ebb" (UID: "6ae1b286-c72f-4430-8504-1144b7265ebb"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:23:47 crc kubenswrapper[4753]: I0129 12:23:47.711665 4753 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/6ae1b286-c72f-4430-8504-1144b7265ebb-util\") on node \"crc\" DevicePath \"\"" Jan 29 12:23:47 crc kubenswrapper[4753]: I0129 12:23:47.712082 4753 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/6ae1b286-c72f-4430-8504-1144b7265ebb-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 12:23:47 crc kubenswrapper[4753]: I0129 12:23:47.712113 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-78bgb\" (UniqueName: \"kubernetes.io/projected/6ae1b286-c72f-4430-8504-1144b7265ebb-kube-api-access-78bgb\") on node \"crc\" DevicePath \"\"" Jan 29 12:23:48 crc kubenswrapper[4753]: I0129 12:23:48.129617 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590mk8wv" event={"ID":"6ae1b286-c72f-4430-8504-1144b7265ebb","Type":"ContainerDied","Data":"07e66a2cda431d5fe15f42ed7fcae7290780b8d7e443786501df5cbdf8493385"} Jan 29 12:23:48 crc kubenswrapper[4753]: I0129 12:23:48.129988 4753 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="07e66a2cda431d5fe15f42ed7fcae7290780b8d7e443786501df5cbdf8493385" Jan 29 12:23:48 crc kubenswrapper[4753]: I0129 12:23:48.129689 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590mk8wv" Jan 29 12:23:56 crc kubenswrapper[4753]: I0129 12:23:56.924259 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-779fc9694b-gcg2x"] Jan 29 12:23:56 crc kubenswrapper[4753]: E0129 12:23:56.926109 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6ae1b286-c72f-4430-8504-1144b7265ebb" containerName="extract" Jan 29 12:23:56 crc kubenswrapper[4753]: I0129 12:23:56.926217 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="6ae1b286-c72f-4430-8504-1144b7265ebb" containerName="extract" Jan 29 12:23:56 crc kubenswrapper[4753]: E0129 12:23:56.926311 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6ae1b286-c72f-4430-8504-1144b7265ebb" containerName="util" Jan 29 12:23:56 crc kubenswrapper[4753]: I0129 12:23:56.926381 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="6ae1b286-c72f-4430-8504-1144b7265ebb" containerName="util" Jan 29 12:23:56 crc kubenswrapper[4753]: E0129 12:23:56.926453 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6ae1b286-c72f-4430-8504-1144b7265ebb" containerName="pull" Jan 29 12:23:56 crc kubenswrapper[4753]: I0129 12:23:56.926521 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="6ae1b286-c72f-4430-8504-1144b7265ebb" containerName="pull" Jan 29 12:23:56 crc kubenswrapper[4753]: I0129 12:23:56.926720 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="6ae1b286-c72f-4430-8504-1144b7265ebb" containerName="extract" Jan 29 12:23:56 crc kubenswrapper[4753]: I0129 12:23:56.927369 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-779fc9694b-gcg2x" Jan 29 12:23:56 crc kubenswrapper[4753]: I0129 12:23:56.932005 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-dockercfg-xdcv8" Jan 29 12:23:56 crc kubenswrapper[4753]: I0129 12:23:56.953407 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-779fc9694b-gcg2x"] Jan 29 12:23:57 crc kubenswrapper[4753]: I0129 12:23:57.075852 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4w4ml\" (UniqueName: \"kubernetes.io/projected/e4d9ede8-e63a-4ba7-b3e4-057ff1298b91-kube-api-access-4w4ml\") pod \"rabbitmq-cluster-operator-779fc9694b-gcg2x\" (UID: \"e4d9ede8-e63a-4ba7-b3e4-057ff1298b91\") " pod="openstack-operators/rabbitmq-cluster-operator-779fc9694b-gcg2x" Jan 29 12:23:57 crc kubenswrapper[4753]: I0129 12:23:57.177797 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4w4ml\" (UniqueName: \"kubernetes.io/projected/e4d9ede8-e63a-4ba7-b3e4-057ff1298b91-kube-api-access-4w4ml\") pod \"rabbitmq-cluster-operator-779fc9694b-gcg2x\" (UID: \"e4d9ede8-e63a-4ba7-b3e4-057ff1298b91\") " pod="openstack-operators/rabbitmq-cluster-operator-779fc9694b-gcg2x" Jan 29 12:23:57 crc kubenswrapper[4753]: I0129 12:23:57.207451 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4w4ml\" (UniqueName: \"kubernetes.io/projected/e4d9ede8-e63a-4ba7-b3e4-057ff1298b91-kube-api-access-4w4ml\") pod \"rabbitmq-cluster-operator-779fc9694b-gcg2x\" (UID: \"e4d9ede8-e63a-4ba7-b3e4-057ff1298b91\") " pod="openstack-operators/rabbitmq-cluster-operator-779fc9694b-gcg2x" Jan 29 12:23:57 crc kubenswrapper[4753]: I0129 12:23:57.252563 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-779fc9694b-gcg2x" Jan 29 12:23:57 crc kubenswrapper[4753]: I0129 12:23:57.686186 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-779fc9694b-gcg2x"] Jan 29 12:23:58 crc kubenswrapper[4753]: I0129 12:23:58.528985 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-779fc9694b-gcg2x" event={"ID":"e4d9ede8-e63a-4ba7-b3e4-057ff1298b91","Type":"ContainerStarted","Data":"82ad557535f79864230d778ed2fdf7777d0cbdb8892d260c4860512cf07faaa8"} Jan 29 12:23:59 crc kubenswrapper[4753]: I0129 12:23:59.252599 4753 patch_prober.go:28] interesting pod/machine-config-daemon-7c24x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 12:23:59 crc kubenswrapper[4753]: I0129 12:23:59.252684 4753 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" podUID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 12:23:59 crc kubenswrapper[4753]: I0129 12:23:59.252741 4753 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" Jan 29 12:23:59 crc kubenswrapper[4753]: I0129 12:23:59.253435 4753 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"a062eb50ea7e6f751f3ad76c7903eb2507edfb5f26af3ebaf7ccc8d61960baaf"} pod="openshift-machine-config-operator/machine-config-daemon-7c24x" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 29 12:23:59 crc kubenswrapper[4753]: I0129 12:23:59.253506 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" podUID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" containerName="machine-config-daemon" containerID="cri-o://a062eb50ea7e6f751f3ad76c7903eb2507edfb5f26af3ebaf7ccc8d61960baaf" gracePeriod=600 Jan 29 12:23:59 crc kubenswrapper[4753]: I0129 12:23:59.541088 4753 generic.go:334] "Generic (PLEG): container finished" podID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" containerID="a062eb50ea7e6f751f3ad76c7903eb2507edfb5f26af3ebaf7ccc8d61960baaf" exitCode=0 Jan 29 12:23:59 crc kubenswrapper[4753]: I0129 12:23:59.541270 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" event={"ID":"b0310995-a7c7-47c3-ae6c-05daaaba92a6","Type":"ContainerDied","Data":"a062eb50ea7e6f751f3ad76c7903eb2507edfb5f26af3ebaf7ccc8d61960baaf"} Jan 29 12:23:59 crc kubenswrapper[4753]: I0129 12:23:59.541440 4753 scope.go:117] "RemoveContainer" containerID="4b8aed6ea15733b89649d8b36ec89a9ec9f88b46186a18e1942be3af55f96320" Jan 29 12:24:00 crc kubenswrapper[4753]: I0129 12:24:00.551706 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" event={"ID":"b0310995-a7c7-47c3-ae6c-05daaaba92a6","Type":"ContainerStarted","Data":"5697d146fc311e04cb43bd311c6234230b3e6c0088cc915fea2e2ab2972df9e8"} Jan 29 12:24:03 crc kubenswrapper[4753]: I0129 12:24:03.656674 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-779fc9694b-gcg2x" event={"ID":"e4d9ede8-e63a-4ba7-b3e4-057ff1298b91","Type":"ContainerStarted","Data":"6112e805c4156eb8689cf1090f692bcddcfdeef5645b3c1e402118d65b56bfa3"} Jan 29 12:24:03 crc kubenswrapper[4753]: I0129 12:24:03.671600 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-779fc9694b-gcg2x" podStartSLOduration=2.851069153 podStartE2EDuration="7.671568664s" podCreationTimestamp="2026-01-29 12:23:56 +0000 UTC" firstStartedPulling="2026-01-29 12:23:57.694412632 +0000 UTC m=+1051.946494087" lastFinishedPulling="2026-01-29 12:24:02.514912143 +0000 UTC m=+1056.766993598" observedRunningTime="2026-01-29 12:24:03.670672849 +0000 UTC m=+1057.922754304" watchObservedRunningTime="2026-01-29 12:24:03.671568664 +0000 UTC m=+1057.923650119" Jan 29 12:24:09 crc kubenswrapper[4753]: I0129 12:24:09.193962 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["swift-kuttl-tests/rabbitmq-server-0"] Jan 29 12:24:09 crc kubenswrapper[4753]: I0129 12:24:09.195375 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/rabbitmq-server-0" Jan 29 12:24:09 crc kubenswrapper[4753]: I0129 12:24:09.200736 4753 reflector.go:368] Caches populated for *v1.Secret from object-"swift-kuttl-tests"/"rabbitmq-erlang-cookie" Jan 29 12:24:09 crc kubenswrapper[4753]: I0129 12:24:09.201883 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"swift-kuttl-tests"/"rabbitmq-plugins-conf" Jan 29 12:24:09 crc kubenswrapper[4753]: I0129 12:24:09.202050 4753 reflector.go:368] Caches populated for *v1.Secret from object-"swift-kuttl-tests"/"rabbitmq-server-dockercfg-2js9v" Jan 29 12:24:09 crc kubenswrapper[4753]: I0129 12:24:09.202205 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"swift-kuttl-tests"/"rabbitmq-server-conf" Jan 29 12:24:09 crc kubenswrapper[4753]: I0129 12:24:09.202470 4753 reflector.go:368] Caches populated for *v1.Secret from object-"swift-kuttl-tests"/"rabbitmq-default-user" Jan 29 12:24:09 crc kubenswrapper[4753]: I0129 12:24:09.206234 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/rabbitmq-server-0"] Jan 29 12:24:09 crc kubenswrapper[4753]: I0129 12:24:09.334808 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/e0033592-f961-4066-9026-3645b09f9524-pod-info\") pod \"rabbitmq-server-0\" (UID: \"e0033592-f961-4066-9026-3645b09f9524\") " pod="swift-kuttl-tests/rabbitmq-server-0" Jan 29 12:24:09 crc kubenswrapper[4753]: I0129 12:24:09.334893 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/e0033592-f961-4066-9026-3645b09f9524-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"e0033592-f961-4066-9026-3645b09f9524\") " pod="swift-kuttl-tests/rabbitmq-server-0" Jan 29 12:24:09 crc kubenswrapper[4753]: I0129 12:24:09.335012 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x9czh\" (UniqueName: \"kubernetes.io/projected/e0033592-f961-4066-9026-3645b09f9524-kube-api-access-x9czh\") pod \"rabbitmq-server-0\" (UID: \"e0033592-f961-4066-9026-3645b09f9524\") " pod="swift-kuttl-tests/rabbitmq-server-0" Jan 29 12:24:09 crc kubenswrapper[4753]: I0129 12:24:09.335047 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/e0033592-f961-4066-9026-3645b09f9524-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"e0033592-f961-4066-9026-3645b09f9524\") " pod="swift-kuttl-tests/rabbitmq-server-0" Jan 29 12:24:09 crc kubenswrapper[4753]: I0129 12:24:09.335093 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/e0033592-f961-4066-9026-3645b09f9524-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"e0033592-f961-4066-9026-3645b09f9524\") " pod="swift-kuttl-tests/rabbitmq-server-0" Jan 29 12:24:09 crc kubenswrapper[4753]: I0129 12:24:09.335212 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/e0033592-f961-4066-9026-3645b09f9524-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"e0033592-f961-4066-9026-3645b09f9524\") " pod="swift-kuttl-tests/rabbitmq-server-0" Jan 29 12:24:09 crc kubenswrapper[4753]: I0129 12:24:09.335301 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-85f927ae-4eda-4b12-9ccc-0a8b9a462f5d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-85f927ae-4eda-4b12-9ccc-0a8b9a462f5d\") pod \"rabbitmq-server-0\" (UID: \"e0033592-f961-4066-9026-3645b09f9524\") " pod="swift-kuttl-tests/rabbitmq-server-0" Jan 29 12:24:09 crc kubenswrapper[4753]: I0129 12:24:09.335337 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/e0033592-f961-4066-9026-3645b09f9524-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"e0033592-f961-4066-9026-3645b09f9524\") " pod="swift-kuttl-tests/rabbitmq-server-0" Jan 29 12:24:09 crc kubenswrapper[4753]: I0129 12:24:09.436320 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/e0033592-f961-4066-9026-3645b09f9524-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"e0033592-f961-4066-9026-3645b09f9524\") " pod="swift-kuttl-tests/rabbitmq-server-0" Jan 29 12:24:09 crc kubenswrapper[4753]: I0129 12:24:09.436427 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-85f927ae-4eda-4b12-9ccc-0a8b9a462f5d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-85f927ae-4eda-4b12-9ccc-0a8b9a462f5d\") pod \"rabbitmq-server-0\" (UID: \"e0033592-f961-4066-9026-3645b09f9524\") " pod="swift-kuttl-tests/rabbitmq-server-0" Jan 29 12:24:09 crc kubenswrapper[4753]: I0129 12:24:09.436488 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/e0033592-f961-4066-9026-3645b09f9524-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"e0033592-f961-4066-9026-3645b09f9524\") " pod="swift-kuttl-tests/rabbitmq-server-0" Jan 29 12:24:09 crc kubenswrapper[4753]: I0129 12:24:09.436523 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/e0033592-f961-4066-9026-3645b09f9524-pod-info\") pod \"rabbitmq-server-0\" (UID: \"e0033592-f961-4066-9026-3645b09f9524\") " pod="swift-kuttl-tests/rabbitmq-server-0" Jan 29 12:24:09 crc kubenswrapper[4753]: I0129 12:24:09.436761 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/e0033592-f961-4066-9026-3645b09f9524-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"e0033592-f961-4066-9026-3645b09f9524\") " pod="swift-kuttl-tests/rabbitmq-server-0" Jan 29 12:24:09 crc kubenswrapper[4753]: I0129 12:24:09.436799 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x9czh\" (UniqueName: \"kubernetes.io/projected/e0033592-f961-4066-9026-3645b09f9524-kube-api-access-x9czh\") pod \"rabbitmq-server-0\" (UID: \"e0033592-f961-4066-9026-3645b09f9524\") " pod="swift-kuttl-tests/rabbitmq-server-0" Jan 29 12:24:09 crc kubenswrapper[4753]: I0129 12:24:09.436824 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/e0033592-f961-4066-9026-3645b09f9524-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"e0033592-f961-4066-9026-3645b09f9524\") " pod="swift-kuttl-tests/rabbitmq-server-0" Jan 29 12:24:09 crc kubenswrapper[4753]: I0129 12:24:09.436867 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/e0033592-f961-4066-9026-3645b09f9524-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"e0033592-f961-4066-9026-3645b09f9524\") " pod="swift-kuttl-tests/rabbitmq-server-0" Jan 29 12:24:09 crc kubenswrapper[4753]: I0129 12:24:09.437155 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/e0033592-f961-4066-9026-3645b09f9524-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"e0033592-f961-4066-9026-3645b09f9524\") " pod="swift-kuttl-tests/rabbitmq-server-0" Jan 29 12:24:09 crc kubenswrapper[4753]: I0129 12:24:09.438038 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/e0033592-f961-4066-9026-3645b09f9524-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"e0033592-f961-4066-9026-3645b09f9524\") " pod="swift-kuttl-tests/rabbitmq-server-0" Jan 29 12:24:09 crc kubenswrapper[4753]: I0129 12:24:09.438255 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/e0033592-f961-4066-9026-3645b09f9524-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"e0033592-f961-4066-9026-3645b09f9524\") " pod="swift-kuttl-tests/rabbitmq-server-0" Jan 29 12:24:09 crc kubenswrapper[4753]: I0129 12:24:09.441751 4753 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 29 12:24:09 crc kubenswrapper[4753]: I0129 12:24:09.441800 4753 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-85f927ae-4eda-4b12-9ccc-0a8b9a462f5d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-85f927ae-4eda-4b12-9ccc-0a8b9a462f5d\") pod \"rabbitmq-server-0\" (UID: \"e0033592-f961-4066-9026-3645b09f9524\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/65db10ee6ccd71934571dbc92eb8feac75b50d0b4778a175053976a77595d581/globalmount\"" pod="swift-kuttl-tests/rabbitmq-server-0" Jan 29 12:24:09 crc kubenswrapper[4753]: I0129 12:24:09.444927 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/e0033592-f961-4066-9026-3645b09f9524-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"e0033592-f961-4066-9026-3645b09f9524\") " pod="swift-kuttl-tests/rabbitmq-server-0" Jan 29 12:24:09 crc kubenswrapper[4753]: I0129 12:24:09.445149 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/e0033592-f961-4066-9026-3645b09f9524-pod-info\") pod \"rabbitmq-server-0\" (UID: \"e0033592-f961-4066-9026-3645b09f9524\") " pod="swift-kuttl-tests/rabbitmq-server-0" Jan 29 12:24:09 crc kubenswrapper[4753]: I0129 12:24:09.450177 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/e0033592-f961-4066-9026-3645b09f9524-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"e0033592-f961-4066-9026-3645b09f9524\") " pod="swift-kuttl-tests/rabbitmq-server-0" Jan 29 12:24:09 crc kubenswrapper[4753]: I0129 12:24:09.455914 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x9czh\" (UniqueName: \"kubernetes.io/projected/e0033592-f961-4066-9026-3645b09f9524-kube-api-access-x9czh\") pod \"rabbitmq-server-0\" (UID: \"e0033592-f961-4066-9026-3645b09f9524\") " pod="swift-kuttl-tests/rabbitmq-server-0" Jan 29 12:24:09 crc kubenswrapper[4753]: I0129 12:24:09.472384 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-85f927ae-4eda-4b12-9ccc-0a8b9a462f5d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-85f927ae-4eda-4b12-9ccc-0a8b9a462f5d\") pod \"rabbitmq-server-0\" (UID: \"e0033592-f961-4066-9026-3645b09f9524\") " pod="swift-kuttl-tests/rabbitmq-server-0" Jan 29 12:24:09 crc kubenswrapper[4753]: I0129 12:24:09.518665 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/rabbitmq-server-0" Jan 29 12:24:09 crc kubenswrapper[4753]: I0129 12:24:09.996353 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/rabbitmq-server-0"] Jan 29 12:24:10 crc kubenswrapper[4753]: I0129 12:24:10.713995 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/rabbitmq-server-0" event={"ID":"e0033592-f961-4066-9026-3645b09f9524","Type":"ContainerStarted","Data":"bfd2c866a6ee1d1ec8c2ef5bf56c9a201fcfc285cd8fa6564a5e96db8054ee36"} Jan 29 12:24:10 crc kubenswrapper[4753]: I0129 12:24:10.928609 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-index-qzjgm"] Jan 29 12:24:10 crc kubenswrapper[4753]: I0129 12:24:10.929473 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-index-qzjgm" Jan 29 12:24:10 crc kubenswrapper[4753]: I0129 12:24:10.932084 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-index-dockercfg-k7lvh" Jan 29 12:24:11 crc kubenswrapper[4753]: I0129 12:24:11.150882 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dhr6v\" (UniqueName: \"kubernetes.io/projected/edf5d166-7a55-4ac0-ab98-ff0f503fd9a6-kube-api-access-dhr6v\") pod \"keystone-operator-index-qzjgm\" (UID: \"edf5d166-7a55-4ac0-ab98-ff0f503fd9a6\") " pod="openstack-operators/keystone-operator-index-qzjgm" Jan 29 12:24:11 crc kubenswrapper[4753]: I0129 12:24:11.161449 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-index-qzjgm"] Jan 29 12:24:11 crc kubenswrapper[4753]: I0129 12:24:11.252035 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dhr6v\" (UniqueName: \"kubernetes.io/projected/edf5d166-7a55-4ac0-ab98-ff0f503fd9a6-kube-api-access-dhr6v\") pod \"keystone-operator-index-qzjgm\" (UID: \"edf5d166-7a55-4ac0-ab98-ff0f503fd9a6\") " pod="openstack-operators/keystone-operator-index-qzjgm" Jan 29 12:24:11 crc kubenswrapper[4753]: I0129 12:24:11.270552 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dhr6v\" (UniqueName: \"kubernetes.io/projected/edf5d166-7a55-4ac0-ab98-ff0f503fd9a6-kube-api-access-dhr6v\") pod \"keystone-operator-index-qzjgm\" (UID: \"edf5d166-7a55-4ac0-ab98-ff0f503fd9a6\") " pod="openstack-operators/keystone-operator-index-qzjgm" Jan 29 12:24:11 crc kubenswrapper[4753]: I0129 12:24:11.485780 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-index-qzjgm" Jan 29 12:24:11 crc kubenswrapper[4753]: I0129 12:24:11.849642 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-index-qzjgm"] Jan 29 12:24:11 crc kubenswrapper[4753]: W0129 12:24:11.866470 4753 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podedf5d166_7a55_4ac0_ab98_ff0f503fd9a6.slice/crio-180619a27f994f9612d9ab9a5d139e97343c3b2d10beac31d69ce5ef9bb3d33a WatchSource:0}: Error finding container 180619a27f994f9612d9ab9a5d139e97343c3b2d10beac31d69ce5ef9bb3d33a: Status 404 returned error can't find the container with id 180619a27f994f9612d9ab9a5d139e97343c3b2d10beac31d69ce5ef9bb3d33a Jan 29 12:24:12 crc kubenswrapper[4753]: I0129 12:24:12.779185 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-index-qzjgm" event={"ID":"edf5d166-7a55-4ac0-ab98-ff0f503fd9a6","Type":"ContainerStarted","Data":"180619a27f994f9612d9ab9a5d139e97343c3b2d10beac31d69ce5ef9bb3d33a"} Jan 29 12:24:22 crc kubenswrapper[4753]: I0129 12:24:22.032213 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-index-qzjgm" event={"ID":"edf5d166-7a55-4ac0-ab98-ff0f503fd9a6","Type":"ContainerStarted","Data":"85106337f75ed28f3f76698f58fc7ceeafba753148daf1aab371218951585671"} Jan 29 12:24:22 crc kubenswrapper[4753]: I0129 12:24:22.053181 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/keystone-operator-index-qzjgm" podStartSLOduration=2.921267542 podStartE2EDuration="12.053160773s" podCreationTimestamp="2026-01-29 12:24:10 +0000 UTC" firstStartedPulling="2026-01-29 12:24:11.870292876 +0000 UTC m=+1066.122374331" lastFinishedPulling="2026-01-29 12:24:21.002186107 +0000 UTC m=+1075.254267562" observedRunningTime="2026-01-29 12:24:22.04911999 +0000 UTC m=+1076.301201445" watchObservedRunningTime="2026-01-29 12:24:22.053160773 +0000 UTC m=+1076.305242228" Jan 29 12:24:27 crc kubenswrapper[4753]: I0129 12:24:27.066059 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/rabbitmq-server-0" event={"ID":"e0033592-f961-4066-9026-3645b09f9524","Type":"ContainerStarted","Data":"e1a5e4d95d6e246f97e352beda8f240200743bb7e6d02fdc3e76e9f4b225d812"} Jan 29 12:24:31 crc kubenswrapper[4753]: I0129 12:24:31.489662 4753 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/keystone-operator-index-qzjgm" Jan 29 12:24:31 crc kubenswrapper[4753]: I0129 12:24:31.490092 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-index-qzjgm" Jan 29 12:24:31 crc kubenswrapper[4753]: I0129 12:24:31.520793 4753 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/keystone-operator-index-qzjgm" Jan 29 12:24:32 crc kubenswrapper[4753]: I0129 12:24:32.228681 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-index-qzjgm" Jan 29 12:24:42 crc kubenswrapper[4753]: I0129 12:24:42.370386 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4ef5xgr2"] Jan 29 12:24:42 crc kubenswrapper[4753]: I0129 12:24:42.373993 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4ef5xgr2" Jan 29 12:24:42 crc kubenswrapper[4753]: I0129 12:24:42.381935 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-j2jmz" Jan 29 12:24:42 crc kubenswrapper[4753]: I0129 12:24:42.391947 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4ef5xgr2"] Jan 29 12:24:42 crc kubenswrapper[4753]: I0129 12:24:42.549827 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a3731426-3387-4e13-8aae-52f83d283335-util\") pod \"b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4ef5xgr2\" (UID: \"a3731426-3387-4e13-8aae-52f83d283335\") " pod="openstack-operators/b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4ef5xgr2" Jan 29 12:24:42 crc kubenswrapper[4753]: I0129 12:24:42.550095 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a3731426-3387-4e13-8aae-52f83d283335-bundle\") pod \"b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4ef5xgr2\" (UID: \"a3731426-3387-4e13-8aae-52f83d283335\") " pod="openstack-operators/b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4ef5xgr2" Jan 29 12:24:42 crc kubenswrapper[4753]: I0129 12:24:42.550213 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d46pl\" (UniqueName: \"kubernetes.io/projected/a3731426-3387-4e13-8aae-52f83d283335-kube-api-access-d46pl\") pod \"b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4ef5xgr2\" (UID: \"a3731426-3387-4e13-8aae-52f83d283335\") " pod="openstack-operators/b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4ef5xgr2" Jan 29 12:24:42 crc kubenswrapper[4753]: I0129 12:24:42.651895 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a3731426-3387-4e13-8aae-52f83d283335-util\") pod \"b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4ef5xgr2\" (UID: \"a3731426-3387-4e13-8aae-52f83d283335\") " pod="openstack-operators/b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4ef5xgr2" Jan 29 12:24:42 crc kubenswrapper[4753]: I0129 12:24:42.651996 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a3731426-3387-4e13-8aae-52f83d283335-bundle\") pod \"b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4ef5xgr2\" (UID: \"a3731426-3387-4e13-8aae-52f83d283335\") " pod="openstack-operators/b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4ef5xgr2" Jan 29 12:24:42 crc kubenswrapper[4753]: I0129 12:24:42.652030 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d46pl\" (UniqueName: \"kubernetes.io/projected/a3731426-3387-4e13-8aae-52f83d283335-kube-api-access-d46pl\") pod \"b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4ef5xgr2\" (UID: \"a3731426-3387-4e13-8aae-52f83d283335\") " pod="openstack-operators/b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4ef5xgr2" Jan 29 12:24:42 crc kubenswrapper[4753]: I0129 12:24:42.652543 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a3731426-3387-4e13-8aae-52f83d283335-util\") pod \"b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4ef5xgr2\" (UID: \"a3731426-3387-4e13-8aae-52f83d283335\") " pod="openstack-operators/b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4ef5xgr2" Jan 29 12:24:42 crc kubenswrapper[4753]: I0129 12:24:42.652654 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a3731426-3387-4e13-8aae-52f83d283335-bundle\") pod \"b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4ef5xgr2\" (UID: \"a3731426-3387-4e13-8aae-52f83d283335\") " pod="openstack-operators/b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4ef5xgr2" Jan 29 12:24:42 crc kubenswrapper[4753]: I0129 12:24:42.681461 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d46pl\" (UniqueName: \"kubernetes.io/projected/a3731426-3387-4e13-8aae-52f83d283335-kube-api-access-d46pl\") pod \"b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4ef5xgr2\" (UID: \"a3731426-3387-4e13-8aae-52f83d283335\") " pod="openstack-operators/b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4ef5xgr2" Jan 29 12:24:42 crc kubenswrapper[4753]: I0129 12:24:42.697795 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4ef5xgr2" Jan 29 12:24:43 crc kubenswrapper[4753]: I0129 12:24:43.251280 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4ef5xgr2"] Jan 29 12:24:43 crc kubenswrapper[4753]: W0129 12:24:43.255862 4753 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda3731426_3387_4e13_8aae_52f83d283335.slice/crio-01758befdc7bc7a93edd28b67621721cd59d81047c35a9687f8283c0344eaecd WatchSource:0}: Error finding container 01758befdc7bc7a93edd28b67621721cd59d81047c35a9687f8283c0344eaecd: Status 404 returned error can't find the container with id 01758befdc7bc7a93edd28b67621721cd59d81047c35a9687f8283c0344eaecd Jan 29 12:24:44 crc kubenswrapper[4753]: I0129 12:24:44.201217 4753 generic.go:334] "Generic (PLEG): container finished" podID="a3731426-3387-4e13-8aae-52f83d283335" containerID="2d593656e256ee265e41ce2d191c01b17717b06fe5488bad59cfdb73f4e4de60" exitCode=0 Jan 29 12:24:44 crc kubenswrapper[4753]: I0129 12:24:44.201295 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4ef5xgr2" event={"ID":"a3731426-3387-4e13-8aae-52f83d283335","Type":"ContainerDied","Data":"2d593656e256ee265e41ce2d191c01b17717b06fe5488bad59cfdb73f4e4de60"} Jan 29 12:24:44 crc kubenswrapper[4753]: I0129 12:24:44.201339 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4ef5xgr2" event={"ID":"a3731426-3387-4e13-8aae-52f83d283335","Type":"ContainerStarted","Data":"01758befdc7bc7a93edd28b67621721cd59d81047c35a9687f8283c0344eaecd"} Jan 29 12:24:45 crc kubenswrapper[4753]: I0129 12:24:45.212443 4753 generic.go:334] "Generic (PLEG): container finished" podID="a3731426-3387-4e13-8aae-52f83d283335" containerID="80cd849636ed48f0ac6878f7624b5a64c86305ea76c40e4b2d8a76897f16be26" exitCode=0 Jan 29 12:24:45 crc kubenswrapper[4753]: I0129 12:24:45.212533 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4ef5xgr2" event={"ID":"a3731426-3387-4e13-8aae-52f83d283335","Type":"ContainerDied","Data":"80cd849636ed48f0ac6878f7624b5a64c86305ea76c40e4b2d8a76897f16be26"} Jan 29 12:24:46 crc kubenswrapper[4753]: I0129 12:24:46.238207 4753 generic.go:334] "Generic (PLEG): container finished" podID="a3731426-3387-4e13-8aae-52f83d283335" containerID="214a7cdb6552aee101ffa784b2a2010895c902ea2346b722c0f9d1c73e432c87" exitCode=0 Jan 29 12:24:46 crc kubenswrapper[4753]: I0129 12:24:46.239571 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4ef5xgr2" event={"ID":"a3731426-3387-4e13-8aae-52f83d283335","Type":"ContainerDied","Data":"214a7cdb6552aee101ffa784b2a2010895c902ea2346b722c0f9d1c73e432c87"} Jan 29 12:24:47 crc kubenswrapper[4753]: I0129 12:24:47.508524 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4ef5xgr2" Jan 29 12:24:47 crc kubenswrapper[4753]: I0129 12:24:47.633134 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d46pl\" (UniqueName: \"kubernetes.io/projected/a3731426-3387-4e13-8aae-52f83d283335-kube-api-access-d46pl\") pod \"a3731426-3387-4e13-8aae-52f83d283335\" (UID: \"a3731426-3387-4e13-8aae-52f83d283335\") " Jan 29 12:24:47 crc kubenswrapper[4753]: I0129 12:24:47.633277 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a3731426-3387-4e13-8aae-52f83d283335-util\") pod \"a3731426-3387-4e13-8aae-52f83d283335\" (UID: \"a3731426-3387-4e13-8aae-52f83d283335\") " Jan 29 12:24:47 crc kubenswrapper[4753]: I0129 12:24:47.633342 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a3731426-3387-4e13-8aae-52f83d283335-bundle\") pod \"a3731426-3387-4e13-8aae-52f83d283335\" (UID: \"a3731426-3387-4e13-8aae-52f83d283335\") " Jan 29 12:24:47 crc kubenswrapper[4753]: I0129 12:24:47.634273 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a3731426-3387-4e13-8aae-52f83d283335-bundle" (OuterVolumeSpecName: "bundle") pod "a3731426-3387-4e13-8aae-52f83d283335" (UID: "a3731426-3387-4e13-8aae-52f83d283335"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:24:47 crc kubenswrapper[4753]: I0129 12:24:47.642447 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a3731426-3387-4e13-8aae-52f83d283335-kube-api-access-d46pl" (OuterVolumeSpecName: "kube-api-access-d46pl") pod "a3731426-3387-4e13-8aae-52f83d283335" (UID: "a3731426-3387-4e13-8aae-52f83d283335"). InnerVolumeSpecName "kube-api-access-d46pl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:24:47 crc kubenswrapper[4753]: I0129 12:24:47.659640 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a3731426-3387-4e13-8aae-52f83d283335-util" (OuterVolumeSpecName: "util") pod "a3731426-3387-4e13-8aae-52f83d283335" (UID: "a3731426-3387-4e13-8aae-52f83d283335"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:24:47 crc kubenswrapper[4753]: I0129 12:24:47.735550 4753 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a3731426-3387-4e13-8aae-52f83d283335-util\") on node \"crc\" DevicePath \"\"" Jan 29 12:24:47 crc kubenswrapper[4753]: I0129 12:24:47.735591 4753 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a3731426-3387-4e13-8aae-52f83d283335-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 12:24:47 crc kubenswrapper[4753]: I0129 12:24:47.735600 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d46pl\" (UniqueName: \"kubernetes.io/projected/a3731426-3387-4e13-8aae-52f83d283335-kube-api-access-d46pl\") on node \"crc\" DevicePath \"\"" Jan 29 12:24:48 crc kubenswrapper[4753]: I0129 12:24:48.258888 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4ef5xgr2" event={"ID":"a3731426-3387-4e13-8aae-52f83d283335","Type":"ContainerDied","Data":"01758befdc7bc7a93edd28b67621721cd59d81047c35a9687f8283c0344eaecd"} Jan 29 12:24:48 crc kubenswrapper[4753]: I0129 12:24:48.259480 4753 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="01758befdc7bc7a93edd28b67621721cd59d81047c35a9687f8283c0344eaecd" Jan 29 12:24:48 crc kubenswrapper[4753]: I0129 12:24:48.258982 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4ef5xgr2" Jan 29 12:24:59 crc kubenswrapper[4753]: I0129 12:24:59.355365 4753 generic.go:334] "Generic (PLEG): container finished" podID="e0033592-f961-4066-9026-3645b09f9524" containerID="e1a5e4d95d6e246f97e352beda8f240200743bb7e6d02fdc3e76e9f4b225d812" exitCode=0 Jan 29 12:24:59 crc kubenswrapper[4753]: I0129 12:24:59.355586 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/rabbitmq-server-0" event={"ID":"e0033592-f961-4066-9026-3645b09f9524","Type":"ContainerDied","Data":"e1a5e4d95d6e246f97e352beda8f240200743bb7e6d02fdc3e76e9f4b225d812"} Jan 29 12:24:59 crc kubenswrapper[4753]: I0129 12:24:59.668323 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-controller-manager-6c7b546b79-zwbp5"] Jan 29 12:24:59 crc kubenswrapper[4753]: E0129 12:24:59.668912 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a3731426-3387-4e13-8aae-52f83d283335" containerName="util" Jan 29 12:24:59 crc kubenswrapper[4753]: I0129 12:24:59.668927 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="a3731426-3387-4e13-8aae-52f83d283335" containerName="util" Jan 29 12:24:59 crc kubenswrapper[4753]: E0129 12:24:59.668943 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a3731426-3387-4e13-8aae-52f83d283335" containerName="pull" Jan 29 12:24:59 crc kubenswrapper[4753]: I0129 12:24:59.668948 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="a3731426-3387-4e13-8aae-52f83d283335" containerName="pull" Jan 29 12:24:59 crc kubenswrapper[4753]: E0129 12:24:59.668962 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a3731426-3387-4e13-8aae-52f83d283335" containerName="extract" Jan 29 12:24:59 crc kubenswrapper[4753]: I0129 12:24:59.668968 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="a3731426-3387-4e13-8aae-52f83d283335" containerName="extract" Jan 29 12:24:59 crc kubenswrapper[4753]: I0129 12:24:59.669080 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="a3731426-3387-4e13-8aae-52f83d283335" containerName="extract" Jan 29 12:24:59 crc kubenswrapper[4753]: I0129 12:24:59.669540 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-6c7b546b79-zwbp5" Jan 29 12:24:59 crc kubenswrapper[4753]: I0129 12:24:59.672819 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-service-cert" Jan 29 12:24:59 crc kubenswrapper[4753]: I0129 12:24:59.673058 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-s2c9b" Jan 29 12:24:59 crc kubenswrapper[4753]: I0129 12:24:59.734356 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-6c7b546b79-zwbp5"] Jan 29 12:24:59 crc kubenswrapper[4753]: I0129 12:24:59.793746 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/d77d4839-0017-46ba-8f19-d8ddc01a1878-apiservice-cert\") pod \"keystone-operator-controller-manager-6c7b546b79-zwbp5\" (UID: \"d77d4839-0017-46ba-8f19-d8ddc01a1878\") " pod="openstack-operators/keystone-operator-controller-manager-6c7b546b79-zwbp5" Jan 29 12:24:59 crc kubenswrapper[4753]: I0129 12:24:59.793836 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/d77d4839-0017-46ba-8f19-d8ddc01a1878-webhook-cert\") pod \"keystone-operator-controller-manager-6c7b546b79-zwbp5\" (UID: \"d77d4839-0017-46ba-8f19-d8ddc01a1878\") " pod="openstack-operators/keystone-operator-controller-manager-6c7b546b79-zwbp5" Jan 29 12:24:59 crc kubenswrapper[4753]: I0129 12:24:59.793912 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t9k5t\" (UniqueName: \"kubernetes.io/projected/d77d4839-0017-46ba-8f19-d8ddc01a1878-kube-api-access-t9k5t\") pod \"keystone-operator-controller-manager-6c7b546b79-zwbp5\" (UID: \"d77d4839-0017-46ba-8f19-d8ddc01a1878\") " pod="openstack-operators/keystone-operator-controller-manager-6c7b546b79-zwbp5" Jan 29 12:24:59 crc kubenswrapper[4753]: I0129 12:24:59.895344 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/d77d4839-0017-46ba-8f19-d8ddc01a1878-apiservice-cert\") pod \"keystone-operator-controller-manager-6c7b546b79-zwbp5\" (UID: \"d77d4839-0017-46ba-8f19-d8ddc01a1878\") " pod="openstack-operators/keystone-operator-controller-manager-6c7b546b79-zwbp5" Jan 29 12:24:59 crc kubenswrapper[4753]: I0129 12:24:59.895421 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/d77d4839-0017-46ba-8f19-d8ddc01a1878-webhook-cert\") pod \"keystone-operator-controller-manager-6c7b546b79-zwbp5\" (UID: \"d77d4839-0017-46ba-8f19-d8ddc01a1878\") " pod="openstack-operators/keystone-operator-controller-manager-6c7b546b79-zwbp5" Jan 29 12:24:59 crc kubenswrapper[4753]: I0129 12:24:59.895467 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t9k5t\" (UniqueName: \"kubernetes.io/projected/d77d4839-0017-46ba-8f19-d8ddc01a1878-kube-api-access-t9k5t\") pod \"keystone-operator-controller-manager-6c7b546b79-zwbp5\" (UID: \"d77d4839-0017-46ba-8f19-d8ddc01a1878\") " pod="openstack-operators/keystone-operator-controller-manager-6c7b546b79-zwbp5" Jan 29 12:24:59 crc kubenswrapper[4753]: I0129 12:24:59.905246 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/d77d4839-0017-46ba-8f19-d8ddc01a1878-webhook-cert\") pod \"keystone-operator-controller-manager-6c7b546b79-zwbp5\" (UID: \"d77d4839-0017-46ba-8f19-d8ddc01a1878\") " pod="openstack-operators/keystone-operator-controller-manager-6c7b546b79-zwbp5" Jan 29 12:24:59 crc kubenswrapper[4753]: I0129 12:24:59.918263 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/d77d4839-0017-46ba-8f19-d8ddc01a1878-apiservice-cert\") pod \"keystone-operator-controller-manager-6c7b546b79-zwbp5\" (UID: \"d77d4839-0017-46ba-8f19-d8ddc01a1878\") " pod="openstack-operators/keystone-operator-controller-manager-6c7b546b79-zwbp5" Jan 29 12:24:59 crc kubenswrapper[4753]: I0129 12:24:59.921002 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t9k5t\" (UniqueName: \"kubernetes.io/projected/d77d4839-0017-46ba-8f19-d8ddc01a1878-kube-api-access-t9k5t\") pod \"keystone-operator-controller-manager-6c7b546b79-zwbp5\" (UID: \"d77d4839-0017-46ba-8f19-d8ddc01a1878\") " pod="openstack-operators/keystone-operator-controller-manager-6c7b546b79-zwbp5" Jan 29 12:24:59 crc kubenswrapper[4753]: I0129 12:24:59.986247 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-6c7b546b79-zwbp5" Jan 29 12:25:00 crc kubenswrapper[4753]: I0129 12:25:00.331494 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-6c7b546b79-zwbp5"] Jan 29 12:25:00 crc kubenswrapper[4753]: W0129 12:25:00.339072 4753 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd77d4839_0017_46ba_8f19_d8ddc01a1878.slice/crio-d3e8709eb81a390aa67b6cd40903280f57912773a97b2b770cdb7187a1ad3e95 WatchSource:0}: Error finding container d3e8709eb81a390aa67b6cd40903280f57912773a97b2b770cdb7187a1ad3e95: Status 404 returned error can't find the container with id d3e8709eb81a390aa67b6cd40903280f57912773a97b2b770cdb7187a1ad3e95 Jan 29 12:25:00 crc kubenswrapper[4753]: I0129 12:25:00.341997 4753 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 29 12:25:00 crc kubenswrapper[4753]: I0129 12:25:00.365920 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-6c7b546b79-zwbp5" event={"ID":"d77d4839-0017-46ba-8f19-d8ddc01a1878","Type":"ContainerStarted","Data":"d3e8709eb81a390aa67b6cd40903280f57912773a97b2b770cdb7187a1ad3e95"} Jan 29 12:25:00 crc kubenswrapper[4753]: I0129 12:25:00.368335 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/rabbitmq-server-0" event={"ID":"e0033592-f961-4066-9026-3645b09f9524","Type":"ContainerStarted","Data":"108d86555b06dd78575168c0015609068de2878c9d5b7f8b53f63cb72ed39786"} Jan 29 12:25:00 crc kubenswrapper[4753]: I0129 12:25:00.368574 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="swift-kuttl-tests/rabbitmq-server-0" Jan 29 12:25:00 crc kubenswrapper[4753]: I0129 12:25:00.421157 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="swift-kuttl-tests/rabbitmq-server-0" podStartSLOduration=41.42566383 podStartE2EDuration="52.421133292s" podCreationTimestamp="2026-01-29 12:24:08 +0000 UTC" firstStartedPulling="2026-01-29 12:24:10.00550215 +0000 UTC m=+1064.257583625" lastFinishedPulling="2026-01-29 12:24:21.000971632 +0000 UTC m=+1075.253053087" observedRunningTime="2026-01-29 12:25:00.417083518 +0000 UTC m=+1114.669164983" watchObservedRunningTime="2026-01-29 12:25:00.421133292 +0000 UTC m=+1114.673214747" Jan 29 12:25:06 crc kubenswrapper[4753]: I0129 12:25:06.597140 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-6c7b546b79-zwbp5" event={"ID":"d77d4839-0017-46ba-8f19-d8ddc01a1878","Type":"ContainerStarted","Data":"7122b083e3807974cb87d059398a493bae9ffc5a54e1b8b45993cf9c52e14311"} Jan 29 12:25:06 crc kubenswrapper[4753]: I0129 12:25:06.597523 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-6c7b546b79-zwbp5" Jan 29 12:25:06 crc kubenswrapper[4753]: I0129 12:25:06.624959 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/keystone-operator-controller-manager-6c7b546b79-zwbp5" podStartSLOduration=2.67362573 podStartE2EDuration="7.624860452s" podCreationTimestamp="2026-01-29 12:24:59 +0000 UTC" firstStartedPulling="2026-01-29 12:25:00.341630754 +0000 UTC m=+1114.593712209" lastFinishedPulling="2026-01-29 12:25:05.292865476 +0000 UTC m=+1119.544946931" observedRunningTime="2026-01-29 12:25:06.619385008 +0000 UTC m=+1120.871466473" watchObservedRunningTime="2026-01-29 12:25:06.624860452 +0000 UTC m=+1120.876941907" Jan 29 12:25:09 crc kubenswrapper[4753]: I0129 12:25:09.521472 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="swift-kuttl-tests/rabbitmq-server-0" Jan 29 12:25:19 crc kubenswrapper[4753]: I0129 12:25:19.993631 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-6c7b546b79-zwbp5" Jan 29 12:25:24 crc kubenswrapper[4753]: I0129 12:25:24.881930 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["swift-kuttl-tests/keystone-db-create-ll7kp"] Jan 29 12:25:24 crc kubenswrapper[4753]: I0129 12:25:24.883627 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/keystone-db-create-ll7kp" Jan 29 12:25:24 crc kubenswrapper[4753]: I0129 12:25:24.897904 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["swift-kuttl-tests/keystone-dfb1-account-create-update-sz78h"] Jan 29 12:25:24 crc kubenswrapper[4753]: I0129 12:25:24.898685 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/keystone-dfb1-account-create-update-sz78h" Jan 29 12:25:24 crc kubenswrapper[4753]: I0129 12:25:24.903875 4753 reflector.go:368] Caches populated for *v1.Secret from object-"swift-kuttl-tests"/"keystone-db-secret" Jan 29 12:25:24 crc kubenswrapper[4753]: I0129 12:25:24.917046 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/keystone-db-create-ll7kp"] Jan 29 12:25:24 crc kubenswrapper[4753]: I0129 12:25:24.931050 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/keystone-dfb1-account-create-update-sz78h"] Jan 29 12:25:25 crc kubenswrapper[4753]: I0129 12:25:25.051277 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jvnk9\" (UniqueName: \"kubernetes.io/projected/924e55f9-5adb-46df-88dc-6a3ffcef9fba-kube-api-access-jvnk9\") pod \"keystone-dfb1-account-create-update-sz78h\" (UID: \"924e55f9-5adb-46df-88dc-6a3ffcef9fba\") " pod="swift-kuttl-tests/keystone-dfb1-account-create-update-sz78h" Jan 29 12:25:25 crc kubenswrapper[4753]: I0129 12:25:25.051362 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/078e56f6-dfd4-4ca7-81c4-2d4a5efd44a3-operator-scripts\") pod \"keystone-db-create-ll7kp\" (UID: \"078e56f6-dfd4-4ca7-81c4-2d4a5efd44a3\") " pod="swift-kuttl-tests/keystone-db-create-ll7kp" Jan 29 12:25:25 crc kubenswrapper[4753]: I0129 12:25:25.051392 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6l8cf\" (UniqueName: \"kubernetes.io/projected/078e56f6-dfd4-4ca7-81c4-2d4a5efd44a3-kube-api-access-6l8cf\") pod \"keystone-db-create-ll7kp\" (UID: \"078e56f6-dfd4-4ca7-81c4-2d4a5efd44a3\") " pod="swift-kuttl-tests/keystone-db-create-ll7kp" Jan 29 12:25:25 crc kubenswrapper[4753]: I0129 12:25:25.051491 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/924e55f9-5adb-46df-88dc-6a3ffcef9fba-operator-scripts\") pod \"keystone-dfb1-account-create-update-sz78h\" (UID: \"924e55f9-5adb-46df-88dc-6a3ffcef9fba\") " pod="swift-kuttl-tests/keystone-dfb1-account-create-update-sz78h" Jan 29 12:25:25 crc kubenswrapper[4753]: I0129 12:25:25.153418 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jvnk9\" (UniqueName: \"kubernetes.io/projected/924e55f9-5adb-46df-88dc-6a3ffcef9fba-kube-api-access-jvnk9\") pod \"keystone-dfb1-account-create-update-sz78h\" (UID: \"924e55f9-5adb-46df-88dc-6a3ffcef9fba\") " pod="swift-kuttl-tests/keystone-dfb1-account-create-update-sz78h" Jan 29 12:25:25 crc kubenswrapper[4753]: I0129 12:25:25.153854 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/078e56f6-dfd4-4ca7-81c4-2d4a5efd44a3-operator-scripts\") pod \"keystone-db-create-ll7kp\" (UID: \"078e56f6-dfd4-4ca7-81c4-2d4a5efd44a3\") " pod="swift-kuttl-tests/keystone-db-create-ll7kp" Jan 29 12:25:25 crc kubenswrapper[4753]: I0129 12:25:25.153879 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6l8cf\" (UniqueName: \"kubernetes.io/projected/078e56f6-dfd4-4ca7-81c4-2d4a5efd44a3-kube-api-access-6l8cf\") pod \"keystone-db-create-ll7kp\" (UID: \"078e56f6-dfd4-4ca7-81c4-2d4a5efd44a3\") " pod="swift-kuttl-tests/keystone-db-create-ll7kp" Jan 29 12:25:25 crc kubenswrapper[4753]: I0129 12:25:25.154687 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/078e56f6-dfd4-4ca7-81c4-2d4a5efd44a3-operator-scripts\") pod \"keystone-db-create-ll7kp\" (UID: \"078e56f6-dfd4-4ca7-81c4-2d4a5efd44a3\") " pod="swift-kuttl-tests/keystone-db-create-ll7kp" Jan 29 12:25:25 crc kubenswrapper[4753]: I0129 12:25:25.154834 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/924e55f9-5adb-46df-88dc-6a3ffcef9fba-operator-scripts\") pod \"keystone-dfb1-account-create-update-sz78h\" (UID: \"924e55f9-5adb-46df-88dc-6a3ffcef9fba\") " pod="swift-kuttl-tests/keystone-dfb1-account-create-update-sz78h" Jan 29 12:25:25 crc kubenswrapper[4753]: I0129 12:25:25.155378 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/924e55f9-5adb-46df-88dc-6a3ffcef9fba-operator-scripts\") pod \"keystone-dfb1-account-create-update-sz78h\" (UID: \"924e55f9-5adb-46df-88dc-6a3ffcef9fba\") " pod="swift-kuttl-tests/keystone-dfb1-account-create-update-sz78h" Jan 29 12:25:25 crc kubenswrapper[4753]: I0129 12:25:25.195680 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jvnk9\" (UniqueName: \"kubernetes.io/projected/924e55f9-5adb-46df-88dc-6a3ffcef9fba-kube-api-access-jvnk9\") pod \"keystone-dfb1-account-create-update-sz78h\" (UID: \"924e55f9-5adb-46df-88dc-6a3ffcef9fba\") " pod="swift-kuttl-tests/keystone-dfb1-account-create-update-sz78h" Jan 29 12:25:25 crc kubenswrapper[4753]: I0129 12:25:25.197129 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6l8cf\" (UniqueName: \"kubernetes.io/projected/078e56f6-dfd4-4ca7-81c4-2d4a5efd44a3-kube-api-access-6l8cf\") pod \"keystone-db-create-ll7kp\" (UID: \"078e56f6-dfd4-4ca7-81c4-2d4a5efd44a3\") " pod="swift-kuttl-tests/keystone-db-create-ll7kp" Jan 29 12:25:25 crc kubenswrapper[4753]: I0129 12:25:25.206140 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/keystone-db-create-ll7kp" Jan 29 12:25:25 crc kubenswrapper[4753]: I0129 12:25:25.221904 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/keystone-dfb1-account-create-update-sz78h" Jan 29 12:25:26 crc kubenswrapper[4753]: I0129 12:25:26.617503 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/keystone-dfb1-account-create-update-sz78h"] Jan 29 12:25:26 crc kubenswrapper[4753]: I0129 12:25:26.620297 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/keystone-db-create-ll7kp"] Jan 29 12:25:27 crc kubenswrapper[4753]: I0129 12:25:27.060673 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/keystone-db-create-ll7kp" event={"ID":"078e56f6-dfd4-4ca7-81c4-2d4a5efd44a3","Type":"ContainerStarted","Data":"14a0794c4dd096cd14b5c2b51ef1d350510fda6035786468620cf1f2d1d70dc7"} Jan 29 12:25:27 crc kubenswrapper[4753]: I0129 12:25:27.061037 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/keystone-db-create-ll7kp" event={"ID":"078e56f6-dfd4-4ca7-81c4-2d4a5efd44a3","Type":"ContainerStarted","Data":"0a726cd17293d89b645c8c39f36e3a0b7eb4dd43776cf592a9581d3dabfdc5f3"} Jan 29 12:25:27 crc kubenswrapper[4753]: I0129 12:25:27.063917 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/keystone-dfb1-account-create-update-sz78h" event={"ID":"924e55f9-5adb-46df-88dc-6a3ffcef9fba","Type":"ContainerStarted","Data":"b26040703dd15e8e80da2fa0771fc55a873b8bd701de901596d2cfd5a55c1d0b"} Jan 29 12:25:27 crc kubenswrapper[4753]: I0129 12:25:27.063956 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/keystone-dfb1-account-create-update-sz78h" event={"ID":"924e55f9-5adb-46df-88dc-6a3ffcef9fba","Type":"ContainerStarted","Data":"61c5817bb41969f09dc5968d7b36275d7a085de8106e935b760c43114388c7ad"} Jan 29 12:25:27 crc kubenswrapper[4753]: I0129 12:25:27.084350 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="swift-kuttl-tests/keystone-db-create-ll7kp" podStartSLOduration=3.084323496 podStartE2EDuration="3.084323496s" podCreationTimestamp="2026-01-29 12:25:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:25:27.079694386 +0000 UTC m=+1141.331775851" watchObservedRunningTime="2026-01-29 12:25:27.084323496 +0000 UTC m=+1141.336404951" Jan 29 12:25:28 crc kubenswrapper[4753]: I0129 12:25:28.074633 4753 generic.go:334] "Generic (PLEG): container finished" podID="924e55f9-5adb-46df-88dc-6a3ffcef9fba" containerID="b26040703dd15e8e80da2fa0771fc55a873b8bd701de901596d2cfd5a55c1d0b" exitCode=0 Jan 29 12:25:28 crc kubenswrapper[4753]: I0129 12:25:28.074706 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/keystone-dfb1-account-create-update-sz78h" event={"ID":"924e55f9-5adb-46df-88dc-6a3ffcef9fba","Type":"ContainerDied","Data":"b26040703dd15e8e80da2fa0771fc55a873b8bd701de901596d2cfd5a55c1d0b"} Jan 29 12:25:28 crc kubenswrapper[4753]: I0129 12:25:28.080331 4753 generic.go:334] "Generic (PLEG): container finished" podID="078e56f6-dfd4-4ca7-81c4-2d4a5efd44a3" containerID="14a0794c4dd096cd14b5c2b51ef1d350510fda6035786468620cf1f2d1d70dc7" exitCode=0 Jan 29 12:25:28 crc kubenswrapper[4753]: I0129 12:25:28.080528 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/keystone-db-create-ll7kp" event={"ID":"078e56f6-dfd4-4ca7-81c4-2d4a5efd44a3","Type":"ContainerDied","Data":"14a0794c4dd096cd14b5c2b51ef1d350510fda6035786468620cf1f2d1d70dc7"} Jan 29 12:25:30 crc kubenswrapper[4753]: I0129 12:25:30.196086 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/keystone-dfb1-account-create-update-sz78h" Jan 29 12:25:30 crc kubenswrapper[4753]: I0129 12:25:30.201313 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/keystone-db-create-ll7kp" Jan 29 12:25:30 crc kubenswrapper[4753]: I0129 12:25:30.231738 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6l8cf\" (UniqueName: \"kubernetes.io/projected/078e56f6-dfd4-4ca7-81c4-2d4a5efd44a3-kube-api-access-6l8cf\") pod \"078e56f6-dfd4-4ca7-81c4-2d4a5efd44a3\" (UID: \"078e56f6-dfd4-4ca7-81c4-2d4a5efd44a3\") " Jan 29 12:25:30 crc kubenswrapper[4753]: I0129 12:25:30.231856 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/078e56f6-dfd4-4ca7-81c4-2d4a5efd44a3-operator-scripts\") pod \"078e56f6-dfd4-4ca7-81c4-2d4a5efd44a3\" (UID: \"078e56f6-dfd4-4ca7-81c4-2d4a5efd44a3\") " Jan 29 12:25:30 crc kubenswrapper[4753]: I0129 12:25:30.231886 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/924e55f9-5adb-46df-88dc-6a3ffcef9fba-operator-scripts\") pod \"924e55f9-5adb-46df-88dc-6a3ffcef9fba\" (UID: \"924e55f9-5adb-46df-88dc-6a3ffcef9fba\") " Jan 29 12:25:30 crc kubenswrapper[4753]: I0129 12:25:30.231973 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jvnk9\" (UniqueName: \"kubernetes.io/projected/924e55f9-5adb-46df-88dc-6a3ffcef9fba-kube-api-access-jvnk9\") pod \"924e55f9-5adb-46df-88dc-6a3ffcef9fba\" (UID: \"924e55f9-5adb-46df-88dc-6a3ffcef9fba\") " Jan 29 12:25:30 crc kubenswrapper[4753]: I0129 12:25:30.237168 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/078e56f6-dfd4-4ca7-81c4-2d4a5efd44a3-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "078e56f6-dfd4-4ca7-81c4-2d4a5efd44a3" (UID: "078e56f6-dfd4-4ca7-81c4-2d4a5efd44a3"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:25:30 crc kubenswrapper[4753]: I0129 12:25:30.237928 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/924e55f9-5adb-46df-88dc-6a3ffcef9fba-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "924e55f9-5adb-46df-88dc-6a3ffcef9fba" (UID: "924e55f9-5adb-46df-88dc-6a3ffcef9fba"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:25:30 crc kubenswrapper[4753]: I0129 12:25:30.238516 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/078e56f6-dfd4-4ca7-81c4-2d4a5efd44a3-kube-api-access-6l8cf" (OuterVolumeSpecName: "kube-api-access-6l8cf") pod "078e56f6-dfd4-4ca7-81c4-2d4a5efd44a3" (UID: "078e56f6-dfd4-4ca7-81c4-2d4a5efd44a3"). InnerVolumeSpecName "kube-api-access-6l8cf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:25:30 crc kubenswrapper[4753]: I0129 12:25:30.239527 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/924e55f9-5adb-46df-88dc-6a3ffcef9fba-kube-api-access-jvnk9" (OuterVolumeSpecName: "kube-api-access-jvnk9") pod "924e55f9-5adb-46df-88dc-6a3ffcef9fba" (UID: "924e55f9-5adb-46df-88dc-6a3ffcef9fba"). InnerVolumeSpecName "kube-api-access-jvnk9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:25:30 crc kubenswrapper[4753]: I0129 12:25:30.333851 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jvnk9\" (UniqueName: \"kubernetes.io/projected/924e55f9-5adb-46df-88dc-6a3ffcef9fba-kube-api-access-jvnk9\") on node \"crc\" DevicePath \"\"" Jan 29 12:25:30 crc kubenswrapper[4753]: I0129 12:25:30.333888 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6l8cf\" (UniqueName: \"kubernetes.io/projected/078e56f6-dfd4-4ca7-81c4-2d4a5efd44a3-kube-api-access-6l8cf\") on node \"crc\" DevicePath \"\"" Jan 29 12:25:30 crc kubenswrapper[4753]: I0129 12:25:30.333898 4753 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/078e56f6-dfd4-4ca7-81c4-2d4a5efd44a3-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 12:25:30 crc kubenswrapper[4753]: I0129 12:25:30.333907 4753 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/924e55f9-5adb-46df-88dc-6a3ffcef9fba-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 12:25:30 crc kubenswrapper[4753]: I0129 12:25:30.931042 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/barbican-operator-index-bjfgl"] Jan 29 12:25:30 crc kubenswrapper[4753]: E0129 12:25:30.931418 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="078e56f6-dfd4-4ca7-81c4-2d4a5efd44a3" containerName="mariadb-database-create" Jan 29 12:25:30 crc kubenswrapper[4753]: I0129 12:25:30.931462 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="078e56f6-dfd4-4ca7-81c4-2d4a5efd44a3" containerName="mariadb-database-create" Jan 29 12:25:30 crc kubenswrapper[4753]: E0129 12:25:30.931489 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="924e55f9-5adb-46df-88dc-6a3ffcef9fba" containerName="mariadb-account-create-update" Jan 29 12:25:30 crc kubenswrapper[4753]: I0129 12:25:30.931497 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="924e55f9-5adb-46df-88dc-6a3ffcef9fba" containerName="mariadb-account-create-update" Jan 29 12:25:30 crc kubenswrapper[4753]: I0129 12:25:30.931671 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="078e56f6-dfd4-4ca7-81c4-2d4a5efd44a3" containerName="mariadb-database-create" Jan 29 12:25:30 crc kubenswrapper[4753]: I0129 12:25:30.931697 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="924e55f9-5adb-46df-88dc-6a3ffcef9fba" containerName="mariadb-account-create-update" Jan 29 12:25:30 crc kubenswrapper[4753]: I0129 12:25:30.932305 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-index-bjfgl" Jan 29 12:25:30 crc kubenswrapper[4753]: I0129 12:25:30.934929 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-index-dockercfg-twswv" Jan 29 12:25:30 crc kubenswrapper[4753]: I0129 12:25:30.941294 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-index-bjfgl"] Jan 29 12:25:31 crc kubenswrapper[4753]: I0129 12:25:31.045454 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4rlkh\" (UniqueName: \"kubernetes.io/projected/c0945dd3-02c0-42cd-8a01-5ceba63d4fb7-kube-api-access-4rlkh\") pod \"barbican-operator-index-bjfgl\" (UID: \"c0945dd3-02c0-42cd-8a01-5ceba63d4fb7\") " pod="openstack-operators/barbican-operator-index-bjfgl" Jan 29 12:25:31 crc kubenswrapper[4753]: I0129 12:25:31.112648 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/keystone-db-create-ll7kp" event={"ID":"078e56f6-dfd4-4ca7-81c4-2d4a5efd44a3","Type":"ContainerDied","Data":"0a726cd17293d89b645c8c39f36e3a0b7eb4dd43776cf592a9581d3dabfdc5f3"} Jan 29 12:25:31 crc kubenswrapper[4753]: I0129 12:25:31.112723 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/keystone-db-create-ll7kp" Jan 29 12:25:31 crc kubenswrapper[4753]: I0129 12:25:31.112729 4753 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0a726cd17293d89b645c8c39f36e3a0b7eb4dd43776cf592a9581d3dabfdc5f3" Jan 29 12:25:31 crc kubenswrapper[4753]: I0129 12:25:31.114138 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/keystone-dfb1-account-create-update-sz78h" event={"ID":"924e55f9-5adb-46df-88dc-6a3ffcef9fba","Type":"ContainerDied","Data":"61c5817bb41969f09dc5968d7b36275d7a085de8106e935b760c43114388c7ad"} Jan 29 12:25:31 crc kubenswrapper[4753]: I0129 12:25:31.114192 4753 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="61c5817bb41969f09dc5968d7b36275d7a085de8106e935b760c43114388c7ad" Jan 29 12:25:31 crc kubenswrapper[4753]: I0129 12:25:31.114296 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/keystone-dfb1-account-create-update-sz78h" Jan 29 12:25:31 crc kubenswrapper[4753]: I0129 12:25:31.147751 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4rlkh\" (UniqueName: \"kubernetes.io/projected/c0945dd3-02c0-42cd-8a01-5ceba63d4fb7-kube-api-access-4rlkh\") pod \"barbican-operator-index-bjfgl\" (UID: \"c0945dd3-02c0-42cd-8a01-5ceba63d4fb7\") " pod="openstack-operators/barbican-operator-index-bjfgl" Jan 29 12:25:31 crc kubenswrapper[4753]: I0129 12:25:31.169875 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4rlkh\" (UniqueName: \"kubernetes.io/projected/c0945dd3-02c0-42cd-8a01-5ceba63d4fb7-kube-api-access-4rlkh\") pod \"barbican-operator-index-bjfgl\" (UID: \"c0945dd3-02c0-42cd-8a01-5ceba63d4fb7\") " pod="openstack-operators/barbican-operator-index-bjfgl" Jan 29 12:25:31 crc kubenswrapper[4753]: I0129 12:25:31.252321 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-index-bjfgl" Jan 29 12:25:32 crc kubenswrapper[4753]: I0129 12:25:32.089787 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-index-bjfgl"] Jan 29 12:25:32 crc kubenswrapper[4753]: W0129 12:25:32.097468 4753 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc0945dd3_02c0_42cd_8a01_5ceba63d4fb7.slice/crio-12e9928da6f4b7c2a9513f4938e39e596406720d23e1e007b2005a81ebfececc WatchSource:0}: Error finding container 12e9928da6f4b7c2a9513f4938e39e596406720d23e1e007b2005a81ebfececc: Status 404 returned error can't find the container with id 12e9928da6f4b7c2a9513f4938e39e596406720d23e1e007b2005a81ebfececc Jan 29 12:25:32 crc kubenswrapper[4753]: I0129 12:25:32.122757 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-index-bjfgl" event={"ID":"c0945dd3-02c0-42cd-8a01-5ceba63d4fb7","Type":"ContainerStarted","Data":"12e9928da6f4b7c2a9513f4938e39e596406720d23e1e007b2005a81ebfececc"} Jan 29 12:25:34 crc kubenswrapper[4753]: I0129 12:25:34.212021 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-index-bjfgl" event={"ID":"c0945dd3-02c0-42cd-8a01-5ceba63d4fb7","Type":"ContainerStarted","Data":"a22969bbfa3ce15864f3cf3c75d2bd101d9459960abc7744f77f9aab405e2033"} Jan 29 12:25:34 crc kubenswrapper[4753]: I0129 12:25:34.237705 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/barbican-operator-index-bjfgl" podStartSLOduration=2.50026679 podStartE2EDuration="4.237686491s" podCreationTimestamp="2026-01-29 12:25:30 +0000 UTC" firstStartedPulling="2026-01-29 12:25:32.100498426 +0000 UTC m=+1146.352579881" lastFinishedPulling="2026-01-29 12:25:33.837918127 +0000 UTC m=+1148.089999582" observedRunningTime="2026-01-29 12:25:34.235786518 +0000 UTC m=+1148.487867973" watchObservedRunningTime="2026-01-29 12:25:34.237686491 +0000 UTC m=+1148.489767936" Jan 29 12:25:35 crc kubenswrapper[4753]: I0129 12:25:35.721553 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["swift-kuttl-tests/keystone-db-sync-8ljnq"] Jan 29 12:25:35 crc kubenswrapper[4753]: I0129 12:25:35.723100 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/keystone-db-sync-8ljnq" Jan 29 12:25:35 crc kubenswrapper[4753]: I0129 12:25:35.725633 4753 reflector.go:368] Caches populated for *v1.Secret from object-"swift-kuttl-tests"/"keystone" Jan 29 12:25:35 crc kubenswrapper[4753]: I0129 12:25:35.725653 4753 reflector.go:368] Caches populated for *v1.Secret from object-"swift-kuttl-tests"/"keystone-scripts" Jan 29 12:25:35 crc kubenswrapper[4753]: I0129 12:25:35.725791 4753 reflector.go:368] Caches populated for *v1.Secret from object-"swift-kuttl-tests"/"keystone-config-data" Jan 29 12:25:35 crc kubenswrapper[4753]: I0129 12:25:35.726292 4753 reflector.go:368] Caches populated for *v1.Secret from object-"swift-kuttl-tests"/"keystone-keystone-dockercfg-6lmn4" Jan 29 12:25:35 crc kubenswrapper[4753]: I0129 12:25:35.734285 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/keystone-db-sync-8ljnq"] Jan 29 12:25:35 crc kubenswrapper[4753]: I0129 12:25:35.920135 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g8hh5\" (UniqueName: \"kubernetes.io/projected/983c251f-33e5-42b4-a00e-5fc29467892d-kube-api-access-g8hh5\") pod \"keystone-db-sync-8ljnq\" (UID: \"983c251f-33e5-42b4-a00e-5fc29467892d\") " pod="swift-kuttl-tests/keystone-db-sync-8ljnq" Jan 29 12:25:35 crc kubenswrapper[4753]: I0129 12:25:35.920578 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/983c251f-33e5-42b4-a00e-5fc29467892d-config-data\") pod \"keystone-db-sync-8ljnq\" (UID: \"983c251f-33e5-42b4-a00e-5fc29467892d\") " pod="swift-kuttl-tests/keystone-db-sync-8ljnq" Jan 29 12:25:36 crc kubenswrapper[4753]: I0129 12:25:36.022166 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/983c251f-33e5-42b4-a00e-5fc29467892d-config-data\") pod \"keystone-db-sync-8ljnq\" (UID: \"983c251f-33e5-42b4-a00e-5fc29467892d\") " pod="swift-kuttl-tests/keystone-db-sync-8ljnq" Jan 29 12:25:36 crc kubenswrapper[4753]: I0129 12:25:36.022353 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g8hh5\" (UniqueName: \"kubernetes.io/projected/983c251f-33e5-42b4-a00e-5fc29467892d-kube-api-access-g8hh5\") pod \"keystone-db-sync-8ljnq\" (UID: \"983c251f-33e5-42b4-a00e-5fc29467892d\") " pod="swift-kuttl-tests/keystone-db-sync-8ljnq" Jan 29 12:25:36 crc kubenswrapper[4753]: I0129 12:25:36.030412 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/983c251f-33e5-42b4-a00e-5fc29467892d-config-data\") pod \"keystone-db-sync-8ljnq\" (UID: \"983c251f-33e5-42b4-a00e-5fc29467892d\") " pod="swift-kuttl-tests/keystone-db-sync-8ljnq" Jan 29 12:25:36 crc kubenswrapper[4753]: I0129 12:25:36.039271 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g8hh5\" (UniqueName: \"kubernetes.io/projected/983c251f-33e5-42b4-a00e-5fc29467892d-kube-api-access-g8hh5\") pod \"keystone-db-sync-8ljnq\" (UID: \"983c251f-33e5-42b4-a00e-5fc29467892d\") " pod="swift-kuttl-tests/keystone-db-sync-8ljnq" Jan 29 12:25:36 crc kubenswrapper[4753]: I0129 12:25:36.041381 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/keystone-db-sync-8ljnq" Jan 29 12:25:36 crc kubenswrapper[4753]: I0129 12:25:36.545321 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/keystone-db-sync-8ljnq"] Jan 29 12:25:37 crc kubenswrapper[4753]: I0129 12:25:37.727530 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/keystone-db-sync-8ljnq" event={"ID":"983c251f-33e5-42b4-a00e-5fc29467892d","Type":"ContainerStarted","Data":"46238517731dd62b638c6af2abcea788e0e9a565303e99bd162f7e9bab28e1cd"} Jan 29 12:25:41 crc kubenswrapper[4753]: I0129 12:25:41.271294 4753 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/barbican-operator-index-bjfgl" Jan 29 12:25:41 crc kubenswrapper[4753]: I0129 12:25:41.273068 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-index-bjfgl" Jan 29 12:25:41 crc kubenswrapper[4753]: I0129 12:25:41.324281 4753 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/barbican-operator-index-bjfgl" Jan 29 12:25:41 crc kubenswrapper[4753]: I0129 12:25:41.783356 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-index-bjfgl" Jan 29 12:25:44 crc kubenswrapper[4753]: I0129 12:25:44.980096 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/68da353acb70414e75cc7da1a6ad4f36343c2ed918d0517bd68193b1b3rcxvz"] Jan 29 12:25:44 crc kubenswrapper[4753]: I0129 12:25:44.983048 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/68da353acb70414e75cc7da1a6ad4f36343c2ed918d0517bd68193b1b3rcxvz" Jan 29 12:25:44 crc kubenswrapper[4753]: I0129 12:25:44.990478 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/68da353acb70414e75cc7da1a6ad4f36343c2ed918d0517bd68193b1b3rcxvz"] Jan 29 12:25:44 crc kubenswrapper[4753]: I0129 12:25:44.999067 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-j2jmz" Jan 29 12:25:45 crc kubenswrapper[4753]: I0129 12:25:45.095122 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/9651a675-c52e-44a3-8f8f-2381bfbefba7-bundle\") pod \"68da353acb70414e75cc7da1a6ad4f36343c2ed918d0517bd68193b1b3rcxvz\" (UID: \"9651a675-c52e-44a3-8f8f-2381bfbefba7\") " pod="openstack-operators/68da353acb70414e75cc7da1a6ad4f36343c2ed918d0517bd68193b1b3rcxvz" Jan 29 12:25:45 crc kubenswrapper[4753]: I0129 12:25:45.095182 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dq46l\" (UniqueName: \"kubernetes.io/projected/9651a675-c52e-44a3-8f8f-2381bfbefba7-kube-api-access-dq46l\") pod \"68da353acb70414e75cc7da1a6ad4f36343c2ed918d0517bd68193b1b3rcxvz\" (UID: \"9651a675-c52e-44a3-8f8f-2381bfbefba7\") " pod="openstack-operators/68da353acb70414e75cc7da1a6ad4f36343c2ed918d0517bd68193b1b3rcxvz" Jan 29 12:25:45 crc kubenswrapper[4753]: I0129 12:25:45.095312 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/9651a675-c52e-44a3-8f8f-2381bfbefba7-util\") pod \"68da353acb70414e75cc7da1a6ad4f36343c2ed918d0517bd68193b1b3rcxvz\" (UID: \"9651a675-c52e-44a3-8f8f-2381bfbefba7\") " pod="openstack-operators/68da353acb70414e75cc7da1a6ad4f36343c2ed918d0517bd68193b1b3rcxvz" Jan 29 12:25:45 crc kubenswrapper[4753]: I0129 12:25:45.196305 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/9651a675-c52e-44a3-8f8f-2381bfbefba7-bundle\") pod \"68da353acb70414e75cc7da1a6ad4f36343c2ed918d0517bd68193b1b3rcxvz\" (UID: \"9651a675-c52e-44a3-8f8f-2381bfbefba7\") " pod="openstack-operators/68da353acb70414e75cc7da1a6ad4f36343c2ed918d0517bd68193b1b3rcxvz" Jan 29 12:25:45 crc kubenswrapper[4753]: I0129 12:25:45.196486 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dq46l\" (UniqueName: \"kubernetes.io/projected/9651a675-c52e-44a3-8f8f-2381bfbefba7-kube-api-access-dq46l\") pod \"68da353acb70414e75cc7da1a6ad4f36343c2ed918d0517bd68193b1b3rcxvz\" (UID: \"9651a675-c52e-44a3-8f8f-2381bfbefba7\") " pod="openstack-operators/68da353acb70414e75cc7da1a6ad4f36343c2ed918d0517bd68193b1b3rcxvz" Jan 29 12:25:45 crc kubenswrapper[4753]: I0129 12:25:45.196567 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/9651a675-c52e-44a3-8f8f-2381bfbefba7-util\") pod \"68da353acb70414e75cc7da1a6ad4f36343c2ed918d0517bd68193b1b3rcxvz\" (UID: \"9651a675-c52e-44a3-8f8f-2381bfbefba7\") " pod="openstack-operators/68da353acb70414e75cc7da1a6ad4f36343c2ed918d0517bd68193b1b3rcxvz" Jan 29 12:25:45 crc kubenswrapper[4753]: I0129 12:25:45.197115 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/9651a675-c52e-44a3-8f8f-2381bfbefba7-bundle\") pod \"68da353acb70414e75cc7da1a6ad4f36343c2ed918d0517bd68193b1b3rcxvz\" (UID: \"9651a675-c52e-44a3-8f8f-2381bfbefba7\") " pod="openstack-operators/68da353acb70414e75cc7da1a6ad4f36343c2ed918d0517bd68193b1b3rcxvz" Jan 29 12:25:45 crc kubenswrapper[4753]: I0129 12:25:45.197394 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/9651a675-c52e-44a3-8f8f-2381bfbefba7-util\") pod \"68da353acb70414e75cc7da1a6ad4f36343c2ed918d0517bd68193b1b3rcxvz\" (UID: \"9651a675-c52e-44a3-8f8f-2381bfbefba7\") " pod="openstack-operators/68da353acb70414e75cc7da1a6ad4f36343c2ed918d0517bd68193b1b3rcxvz" Jan 29 12:25:45 crc kubenswrapper[4753]: I0129 12:25:45.218460 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dq46l\" (UniqueName: \"kubernetes.io/projected/9651a675-c52e-44a3-8f8f-2381bfbefba7-kube-api-access-dq46l\") pod \"68da353acb70414e75cc7da1a6ad4f36343c2ed918d0517bd68193b1b3rcxvz\" (UID: \"9651a675-c52e-44a3-8f8f-2381bfbefba7\") " pod="openstack-operators/68da353acb70414e75cc7da1a6ad4f36343c2ed918d0517bd68193b1b3rcxvz" Jan 29 12:25:45 crc kubenswrapper[4753]: I0129 12:25:45.326562 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/68da353acb70414e75cc7da1a6ad4f36343c2ed918d0517bd68193b1b3rcxvz" Jan 29 12:25:45 crc kubenswrapper[4753]: I0129 12:25:45.750195 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/68da353acb70414e75cc7da1a6ad4f36343c2ed918d0517bd68193b1b3rcxvz"] Jan 29 12:25:45 crc kubenswrapper[4753]: I0129 12:25:45.787521 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/68da353acb70414e75cc7da1a6ad4f36343c2ed918d0517bd68193b1b3rcxvz" event={"ID":"9651a675-c52e-44a3-8f8f-2381bfbefba7","Type":"ContainerStarted","Data":"1f3fb7e2ddbe9f72cf79a1e53eecfc1c54b8bafe0e8f2aef713673ce9012d720"} Jan 29 12:25:45 crc kubenswrapper[4753]: I0129 12:25:45.789695 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/keystone-db-sync-8ljnq" event={"ID":"983c251f-33e5-42b4-a00e-5fc29467892d","Type":"ContainerStarted","Data":"5b75cff6b50c241634eb144f1655b8507425989ac137c4fa0c24e2364433fd8a"} Jan 29 12:25:45 crc kubenswrapper[4753]: I0129 12:25:45.807647 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="swift-kuttl-tests/keystone-db-sync-8ljnq" podStartSLOduration=2.29620026 podStartE2EDuration="10.807559154s" podCreationTimestamp="2026-01-29 12:25:35 +0000 UTC" firstStartedPulling="2026-01-29 12:25:36.5565865 +0000 UTC m=+1150.808667955" lastFinishedPulling="2026-01-29 12:25:45.067945394 +0000 UTC m=+1159.320026849" observedRunningTime="2026-01-29 12:25:45.806571316 +0000 UTC m=+1160.058652781" watchObservedRunningTime="2026-01-29 12:25:45.807559154 +0000 UTC m=+1160.059640649" Jan 29 12:25:46 crc kubenswrapper[4753]: I0129 12:25:46.797723 4753 generic.go:334] "Generic (PLEG): container finished" podID="9651a675-c52e-44a3-8f8f-2381bfbefba7" containerID="3a699a14372b8c8a305238e873dc0a0ada7481e1bf3b1ebe5d2c30194c5dc25a" exitCode=0 Jan 29 12:25:46 crc kubenswrapper[4753]: I0129 12:25:46.797811 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/68da353acb70414e75cc7da1a6ad4f36343c2ed918d0517bd68193b1b3rcxvz" event={"ID":"9651a675-c52e-44a3-8f8f-2381bfbefba7","Type":"ContainerDied","Data":"3a699a14372b8c8a305238e873dc0a0ada7481e1bf3b1ebe5d2c30194c5dc25a"} Jan 29 12:25:48 crc kubenswrapper[4753]: I0129 12:25:48.894971 4753 generic.go:334] "Generic (PLEG): container finished" podID="9651a675-c52e-44a3-8f8f-2381bfbefba7" containerID="01bece6467d47e7482bfb93cf7ed09c408d0f3d2be5fc94c1777450152ea7b02" exitCode=0 Jan 29 12:25:48 crc kubenswrapper[4753]: I0129 12:25:48.895312 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/68da353acb70414e75cc7da1a6ad4f36343c2ed918d0517bd68193b1b3rcxvz" event={"ID":"9651a675-c52e-44a3-8f8f-2381bfbefba7","Type":"ContainerDied","Data":"01bece6467d47e7482bfb93cf7ed09c408d0f3d2be5fc94c1777450152ea7b02"} Jan 29 12:25:49 crc kubenswrapper[4753]: I0129 12:25:49.903012 4753 generic.go:334] "Generic (PLEG): container finished" podID="9651a675-c52e-44a3-8f8f-2381bfbefba7" containerID="9493a081b6664e80a4fc55f550dc410692f87db4482ecc8ff21e5f596171479b" exitCode=0 Jan 29 12:25:49 crc kubenswrapper[4753]: I0129 12:25:49.903085 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/68da353acb70414e75cc7da1a6ad4f36343c2ed918d0517bd68193b1b3rcxvz" event={"ID":"9651a675-c52e-44a3-8f8f-2381bfbefba7","Type":"ContainerDied","Data":"9493a081b6664e80a4fc55f550dc410692f87db4482ecc8ff21e5f596171479b"} Jan 29 12:25:50 crc kubenswrapper[4753]: I0129 12:25:50.913572 4753 generic.go:334] "Generic (PLEG): container finished" podID="983c251f-33e5-42b4-a00e-5fc29467892d" containerID="5b75cff6b50c241634eb144f1655b8507425989ac137c4fa0c24e2364433fd8a" exitCode=0 Jan 29 12:25:50 crc kubenswrapper[4753]: I0129 12:25:50.913716 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/keystone-db-sync-8ljnq" event={"ID":"983c251f-33e5-42b4-a00e-5fc29467892d","Type":"ContainerDied","Data":"5b75cff6b50c241634eb144f1655b8507425989ac137c4fa0c24e2364433fd8a"} Jan 29 12:25:51 crc kubenswrapper[4753]: I0129 12:25:51.336044 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/68da353acb70414e75cc7da1a6ad4f36343c2ed918d0517bd68193b1b3rcxvz" Jan 29 12:25:51 crc kubenswrapper[4753]: I0129 12:25:51.503893 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/9651a675-c52e-44a3-8f8f-2381bfbefba7-util\") pod \"9651a675-c52e-44a3-8f8f-2381bfbefba7\" (UID: \"9651a675-c52e-44a3-8f8f-2381bfbefba7\") " Jan 29 12:25:51 crc kubenswrapper[4753]: I0129 12:25:51.504129 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dq46l\" (UniqueName: \"kubernetes.io/projected/9651a675-c52e-44a3-8f8f-2381bfbefba7-kube-api-access-dq46l\") pod \"9651a675-c52e-44a3-8f8f-2381bfbefba7\" (UID: \"9651a675-c52e-44a3-8f8f-2381bfbefba7\") " Jan 29 12:25:51 crc kubenswrapper[4753]: I0129 12:25:51.504289 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/9651a675-c52e-44a3-8f8f-2381bfbefba7-bundle\") pod \"9651a675-c52e-44a3-8f8f-2381bfbefba7\" (UID: \"9651a675-c52e-44a3-8f8f-2381bfbefba7\") " Jan 29 12:25:51 crc kubenswrapper[4753]: I0129 12:25:51.507495 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9651a675-c52e-44a3-8f8f-2381bfbefba7-bundle" (OuterVolumeSpecName: "bundle") pod "9651a675-c52e-44a3-8f8f-2381bfbefba7" (UID: "9651a675-c52e-44a3-8f8f-2381bfbefba7"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:25:51 crc kubenswrapper[4753]: I0129 12:25:51.529467 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9651a675-c52e-44a3-8f8f-2381bfbefba7-kube-api-access-dq46l" (OuterVolumeSpecName: "kube-api-access-dq46l") pod "9651a675-c52e-44a3-8f8f-2381bfbefba7" (UID: "9651a675-c52e-44a3-8f8f-2381bfbefba7"). InnerVolumeSpecName "kube-api-access-dq46l". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:25:51 crc kubenswrapper[4753]: I0129 12:25:51.595493 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9651a675-c52e-44a3-8f8f-2381bfbefba7-util" (OuterVolumeSpecName: "util") pod "9651a675-c52e-44a3-8f8f-2381bfbefba7" (UID: "9651a675-c52e-44a3-8f8f-2381bfbefba7"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:25:51 crc kubenswrapper[4753]: I0129 12:25:51.605748 4753 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/9651a675-c52e-44a3-8f8f-2381bfbefba7-util\") on node \"crc\" DevicePath \"\"" Jan 29 12:25:51 crc kubenswrapper[4753]: I0129 12:25:51.605796 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dq46l\" (UniqueName: \"kubernetes.io/projected/9651a675-c52e-44a3-8f8f-2381bfbefba7-kube-api-access-dq46l\") on node \"crc\" DevicePath \"\"" Jan 29 12:25:51 crc kubenswrapper[4753]: I0129 12:25:51.605820 4753 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/9651a675-c52e-44a3-8f8f-2381bfbefba7-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 12:25:51 crc kubenswrapper[4753]: I0129 12:25:51.928525 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/68da353acb70414e75cc7da1a6ad4f36343c2ed918d0517bd68193b1b3rcxvz" Jan 29 12:25:51 crc kubenswrapper[4753]: I0129 12:25:51.928531 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/68da353acb70414e75cc7da1a6ad4f36343c2ed918d0517bd68193b1b3rcxvz" event={"ID":"9651a675-c52e-44a3-8f8f-2381bfbefba7","Type":"ContainerDied","Data":"1f3fb7e2ddbe9f72cf79a1e53eecfc1c54b8bafe0e8f2aef713673ce9012d720"} Jan 29 12:25:51 crc kubenswrapper[4753]: I0129 12:25:51.930022 4753 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1f3fb7e2ddbe9f72cf79a1e53eecfc1c54b8bafe0e8f2aef713673ce9012d720" Jan 29 12:25:52 crc kubenswrapper[4753]: I0129 12:25:52.197684 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/keystone-db-sync-8ljnq" Jan 29 12:25:52 crc kubenswrapper[4753]: I0129 12:25:52.219341 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g8hh5\" (UniqueName: \"kubernetes.io/projected/983c251f-33e5-42b4-a00e-5fc29467892d-kube-api-access-g8hh5\") pod \"983c251f-33e5-42b4-a00e-5fc29467892d\" (UID: \"983c251f-33e5-42b4-a00e-5fc29467892d\") " Jan 29 12:25:52 crc kubenswrapper[4753]: I0129 12:25:52.219379 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/983c251f-33e5-42b4-a00e-5fc29467892d-config-data\") pod \"983c251f-33e5-42b4-a00e-5fc29467892d\" (UID: \"983c251f-33e5-42b4-a00e-5fc29467892d\") " Jan 29 12:25:52 crc kubenswrapper[4753]: I0129 12:25:52.225316 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/983c251f-33e5-42b4-a00e-5fc29467892d-kube-api-access-g8hh5" (OuterVolumeSpecName: "kube-api-access-g8hh5") pod "983c251f-33e5-42b4-a00e-5fc29467892d" (UID: "983c251f-33e5-42b4-a00e-5fc29467892d"). InnerVolumeSpecName "kube-api-access-g8hh5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:25:52 crc kubenswrapper[4753]: I0129 12:25:52.249533 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/983c251f-33e5-42b4-a00e-5fc29467892d-config-data" (OuterVolumeSpecName: "config-data") pod "983c251f-33e5-42b4-a00e-5fc29467892d" (UID: "983c251f-33e5-42b4-a00e-5fc29467892d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:25:52 crc kubenswrapper[4753]: I0129 12:25:52.320833 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g8hh5\" (UniqueName: \"kubernetes.io/projected/983c251f-33e5-42b4-a00e-5fc29467892d-kube-api-access-g8hh5\") on node \"crc\" DevicePath \"\"" Jan 29 12:25:52 crc kubenswrapper[4753]: I0129 12:25:52.320872 4753 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/983c251f-33e5-42b4-a00e-5fc29467892d-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 12:25:52 crc kubenswrapper[4753]: I0129 12:25:52.936539 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/keystone-db-sync-8ljnq" event={"ID":"983c251f-33e5-42b4-a00e-5fc29467892d","Type":"ContainerDied","Data":"46238517731dd62b638c6af2abcea788e0e9a565303e99bd162f7e9bab28e1cd"} Jan 29 12:25:52 crc kubenswrapper[4753]: I0129 12:25:52.936835 4753 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="46238517731dd62b638c6af2abcea788e0e9a565303e99bd162f7e9bab28e1cd" Jan 29 12:25:52 crc kubenswrapper[4753]: I0129 12:25:52.936605 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/keystone-db-sync-8ljnq" Jan 29 12:25:53 crc kubenswrapper[4753]: I0129 12:25:53.138647 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["swift-kuttl-tests/keystone-bootstrap-fgpnh"] Jan 29 12:25:53 crc kubenswrapper[4753]: E0129 12:25:53.138974 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9651a675-c52e-44a3-8f8f-2381bfbefba7" containerName="pull" Jan 29 12:25:53 crc kubenswrapper[4753]: I0129 12:25:53.139014 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="9651a675-c52e-44a3-8f8f-2381bfbefba7" containerName="pull" Jan 29 12:25:53 crc kubenswrapper[4753]: E0129 12:25:53.139028 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9651a675-c52e-44a3-8f8f-2381bfbefba7" containerName="util" Jan 29 12:25:53 crc kubenswrapper[4753]: I0129 12:25:53.139034 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="9651a675-c52e-44a3-8f8f-2381bfbefba7" containerName="util" Jan 29 12:25:53 crc kubenswrapper[4753]: E0129 12:25:53.139042 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9651a675-c52e-44a3-8f8f-2381bfbefba7" containerName="extract" Jan 29 12:25:53 crc kubenswrapper[4753]: I0129 12:25:53.139048 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="9651a675-c52e-44a3-8f8f-2381bfbefba7" containerName="extract" Jan 29 12:25:53 crc kubenswrapper[4753]: E0129 12:25:53.139079 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="983c251f-33e5-42b4-a00e-5fc29467892d" containerName="keystone-db-sync" Jan 29 12:25:53 crc kubenswrapper[4753]: I0129 12:25:53.139085 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="983c251f-33e5-42b4-a00e-5fc29467892d" containerName="keystone-db-sync" Jan 29 12:25:53 crc kubenswrapper[4753]: I0129 12:25:53.140344 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="983c251f-33e5-42b4-a00e-5fc29467892d" containerName="keystone-db-sync" Jan 29 12:25:53 crc kubenswrapper[4753]: I0129 12:25:53.140472 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="9651a675-c52e-44a3-8f8f-2381bfbefba7" containerName="extract" Jan 29 12:25:53 crc kubenswrapper[4753]: I0129 12:25:53.141304 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/keystone-bootstrap-fgpnh" Jan 29 12:25:53 crc kubenswrapper[4753]: I0129 12:25:53.144214 4753 reflector.go:368] Caches populated for *v1.Secret from object-"swift-kuttl-tests"/"keystone-keystone-dockercfg-6lmn4" Jan 29 12:25:53 crc kubenswrapper[4753]: I0129 12:25:53.144537 4753 reflector.go:368] Caches populated for *v1.Secret from object-"swift-kuttl-tests"/"keystone" Jan 29 12:25:53 crc kubenswrapper[4753]: I0129 12:25:53.144793 4753 reflector.go:368] Caches populated for *v1.Secret from object-"swift-kuttl-tests"/"keystone-scripts" Jan 29 12:25:53 crc kubenswrapper[4753]: I0129 12:25:53.145095 4753 reflector.go:368] Caches populated for *v1.Secret from object-"swift-kuttl-tests"/"osp-secret" Jan 29 12:25:53 crc kubenswrapper[4753]: I0129 12:25:53.145392 4753 reflector.go:368] Caches populated for *v1.Secret from object-"swift-kuttl-tests"/"keystone-config-data" Jan 29 12:25:53 crc kubenswrapper[4753]: I0129 12:25:53.154647 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/keystone-bootstrap-fgpnh"] Jan 29 12:25:53 crc kubenswrapper[4753]: I0129 12:25:53.236009 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/404ab5c7-422b-4264-baf7-64ab2857eb4b-credential-keys\") pod \"keystone-bootstrap-fgpnh\" (UID: \"404ab5c7-422b-4264-baf7-64ab2857eb4b\") " pod="swift-kuttl-tests/keystone-bootstrap-fgpnh" Jan 29 12:25:53 crc kubenswrapper[4753]: I0129 12:25:53.236096 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/404ab5c7-422b-4264-baf7-64ab2857eb4b-fernet-keys\") pod \"keystone-bootstrap-fgpnh\" (UID: \"404ab5c7-422b-4264-baf7-64ab2857eb4b\") " pod="swift-kuttl-tests/keystone-bootstrap-fgpnh" Jan 29 12:25:53 crc kubenswrapper[4753]: I0129 12:25:53.236126 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h66g6\" (UniqueName: \"kubernetes.io/projected/404ab5c7-422b-4264-baf7-64ab2857eb4b-kube-api-access-h66g6\") pod \"keystone-bootstrap-fgpnh\" (UID: \"404ab5c7-422b-4264-baf7-64ab2857eb4b\") " pod="swift-kuttl-tests/keystone-bootstrap-fgpnh" Jan 29 12:25:53 crc kubenswrapper[4753]: I0129 12:25:53.236168 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/404ab5c7-422b-4264-baf7-64ab2857eb4b-scripts\") pod \"keystone-bootstrap-fgpnh\" (UID: \"404ab5c7-422b-4264-baf7-64ab2857eb4b\") " pod="swift-kuttl-tests/keystone-bootstrap-fgpnh" Jan 29 12:25:53 crc kubenswrapper[4753]: I0129 12:25:53.236210 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/404ab5c7-422b-4264-baf7-64ab2857eb4b-config-data\") pod \"keystone-bootstrap-fgpnh\" (UID: \"404ab5c7-422b-4264-baf7-64ab2857eb4b\") " pod="swift-kuttl-tests/keystone-bootstrap-fgpnh" Jan 29 12:25:53 crc kubenswrapper[4753]: I0129 12:25:53.337039 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/404ab5c7-422b-4264-baf7-64ab2857eb4b-config-data\") pod \"keystone-bootstrap-fgpnh\" (UID: \"404ab5c7-422b-4264-baf7-64ab2857eb4b\") " pod="swift-kuttl-tests/keystone-bootstrap-fgpnh" Jan 29 12:25:53 crc kubenswrapper[4753]: I0129 12:25:53.337124 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/404ab5c7-422b-4264-baf7-64ab2857eb4b-credential-keys\") pod \"keystone-bootstrap-fgpnh\" (UID: \"404ab5c7-422b-4264-baf7-64ab2857eb4b\") " pod="swift-kuttl-tests/keystone-bootstrap-fgpnh" Jan 29 12:25:53 crc kubenswrapper[4753]: I0129 12:25:53.337164 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/404ab5c7-422b-4264-baf7-64ab2857eb4b-fernet-keys\") pod \"keystone-bootstrap-fgpnh\" (UID: \"404ab5c7-422b-4264-baf7-64ab2857eb4b\") " pod="swift-kuttl-tests/keystone-bootstrap-fgpnh" Jan 29 12:25:53 crc kubenswrapper[4753]: I0129 12:25:53.337189 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h66g6\" (UniqueName: \"kubernetes.io/projected/404ab5c7-422b-4264-baf7-64ab2857eb4b-kube-api-access-h66g6\") pod \"keystone-bootstrap-fgpnh\" (UID: \"404ab5c7-422b-4264-baf7-64ab2857eb4b\") " pod="swift-kuttl-tests/keystone-bootstrap-fgpnh" Jan 29 12:25:53 crc kubenswrapper[4753]: I0129 12:25:53.337218 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/404ab5c7-422b-4264-baf7-64ab2857eb4b-scripts\") pod \"keystone-bootstrap-fgpnh\" (UID: \"404ab5c7-422b-4264-baf7-64ab2857eb4b\") " pod="swift-kuttl-tests/keystone-bootstrap-fgpnh" Jan 29 12:25:53 crc kubenswrapper[4753]: I0129 12:25:53.341463 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/404ab5c7-422b-4264-baf7-64ab2857eb4b-scripts\") pod \"keystone-bootstrap-fgpnh\" (UID: \"404ab5c7-422b-4264-baf7-64ab2857eb4b\") " pod="swift-kuttl-tests/keystone-bootstrap-fgpnh" Jan 29 12:25:53 crc kubenswrapper[4753]: I0129 12:25:53.341532 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/404ab5c7-422b-4264-baf7-64ab2857eb4b-config-data\") pod \"keystone-bootstrap-fgpnh\" (UID: \"404ab5c7-422b-4264-baf7-64ab2857eb4b\") " pod="swift-kuttl-tests/keystone-bootstrap-fgpnh" Jan 29 12:25:53 crc kubenswrapper[4753]: I0129 12:25:53.341578 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/404ab5c7-422b-4264-baf7-64ab2857eb4b-fernet-keys\") pod \"keystone-bootstrap-fgpnh\" (UID: \"404ab5c7-422b-4264-baf7-64ab2857eb4b\") " pod="swift-kuttl-tests/keystone-bootstrap-fgpnh" Jan 29 12:25:53 crc kubenswrapper[4753]: I0129 12:25:53.352295 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/404ab5c7-422b-4264-baf7-64ab2857eb4b-credential-keys\") pod \"keystone-bootstrap-fgpnh\" (UID: \"404ab5c7-422b-4264-baf7-64ab2857eb4b\") " pod="swift-kuttl-tests/keystone-bootstrap-fgpnh" Jan 29 12:25:53 crc kubenswrapper[4753]: I0129 12:25:53.355414 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h66g6\" (UniqueName: \"kubernetes.io/projected/404ab5c7-422b-4264-baf7-64ab2857eb4b-kube-api-access-h66g6\") pod \"keystone-bootstrap-fgpnh\" (UID: \"404ab5c7-422b-4264-baf7-64ab2857eb4b\") " pod="swift-kuttl-tests/keystone-bootstrap-fgpnh" Jan 29 12:25:53 crc kubenswrapper[4753]: I0129 12:25:53.463161 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/keystone-bootstrap-fgpnh" Jan 29 12:25:54 crc kubenswrapper[4753]: I0129 12:25:54.004448 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/keystone-bootstrap-fgpnh"] Jan 29 12:25:54 crc kubenswrapper[4753]: I0129 12:25:54.953647 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/keystone-bootstrap-fgpnh" event={"ID":"404ab5c7-422b-4264-baf7-64ab2857eb4b","Type":"ContainerStarted","Data":"206092a3b35a5a4ec282d55dd57d178ea88f61a31810e16320d600723b4fbc46"} Jan 29 12:25:54 crc kubenswrapper[4753]: I0129 12:25:54.953974 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/keystone-bootstrap-fgpnh" event={"ID":"404ab5c7-422b-4264-baf7-64ab2857eb4b","Type":"ContainerStarted","Data":"2ea78adaa4168248e11fdc15df1fcf8bebfa5624c1d72c72ce255e5730cf2474"} Jan 29 12:25:54 crc kubenswrapper[4753]: I0129 12:25:54.982326 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="swift-kuttl-tests/keystone-bootstrap-fgpnh" podStartSLOduration=1.982287372 podStartE2EDuration="1.982287372s" podCreationTimestamp="2026-01-29 12:25:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:25:54.980645016 +0000 UTC m=+1169.232726481" watchObservedRunningTime="2026-01-29 12:25:54.982287372 +0000 UTC m=+1169.234368827" Jan 29 12:25:57 crc kubenswrapper[4753]: I0129 12:25:57.976738 4753 generic.go:334] "Generic (PLEG): container finished" podID="404ab5c7-422b-4264-baf7-64ab2857eb4b" containerID="206092a3b35a5a4ec282d55dd57d178ea88f61a31810e16320d600723b4fbc46" exitCode=0 Jan 29 12:25:57 crc kubenswrapper[4753]: I0129 12:25:57.976839 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/keystone-bootstrap-fgpnh" event={"ID":"404ab5c7-422b-4264-baf7-64ab2857eb4b","Type":"ContainerDied","Data":"206092a3b35a5a4ec282d55dd57d178ea88f61a31810e16320d600723b4fbc46"} Jan 29 12:25:59 crc kubenswrapper[4753]: I0129 12:25:59.252582 4753 patch_prober.go:28] interesting pod/machine-config-daemon-7c24x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 12:25:59 crc kubenswrapper[4753]: I0129 12:25:59.252915 4753 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" podUID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 12:25:59 crc kubenswrapper[4753]: I0129 12:25:59.323573 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/keystone-bootstrap-fgpnh" Jan 29 12:25:59 crc kubenswrapper[4753]: I0129 12:25:59.523060 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/404ab5c7-422b-4264-baf7-64ab2857eb4b-fernet-keys\") pod \"404ab5c7-422b-4264-baf7-64ab2857eb4b\" (UID: \"404ab5c7-422b-4264-baf7-64ab2857eb4b\") " Jan 29 12:25:59 crc kubenswrapper[4753]: I0129 12:25:59.523344 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/404ab5c7-422b-4264-baf7-64ab2857eb4b-scripts\") pod \"404ab5c7-422b-4264-baf7-64ab2857eb4b\" (UID: \"404ab5c7-422b-4264-baf7-64ab2857eb4b\") " Jan 29 12:25:59 crc kubenswrapper[4753]: I0129 12:25:59.523551 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h66g6\" (UniqueName: \"kubernetes.io/projected/404ab5c7-422b-4264-baf7-64ab2857eb4b-kube-api-access-h66g6\") pod \"404ab5c7-422b-4264-baf7-64ab2857eb4b\" (UID: \"404ab5c7-422b-4264-baf7-64ab2857eb4b\") " Jan 29 12:25:59 crc kubenswrapper[4753]: I0129 12:25:59.523686 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/404ab5c7-422b-4264-baf7-64ab2857eb4b-config-data\") pod \"404ab5c7-422b-4264-baf7-64ab2857eb4b\" (UID: \"404ab5c7-422b-4264-baf7-64ab2857eb4b\") " Jan 29 12:25:59 crc kubenswrapper[4753]: I0129 12:25:59.523836 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/404ab5c7-422b-4264-baf7-64ab2857eb4b-credential-keys\") pod \"404ab5c7-422b-4264-baf7-64ab2857eb4b\" (UID: \"404ab5c7-422b-4264-baf7-64ab2857eb4b\") " Jan 29 12:25:59 crc kubenswrapper[4753]: I0129 12:25:59.529790 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/404ab5c7-422b-4264-baf7-64ab2857eb4b-scripts" (OuterVolumeSpecName: "scripts") pod "404ab5c7-422b-4264-baf7-64ab2857eb4b" (UID: "404ab5c7-422b-4264-baf7-64ab2857eb4b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:25:59 crc kubenswrapper[4753]: I0129 12:25:59.530422 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/404ab5c7-422b-4264-baf7-64ab2857eb4b-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "404ab5c7-422b-4264-baf7-64ab2857eb4b" (UID: "404ab5c7-422b-4264-baf7-64ab2857eb4b"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:25:59 crc kubenswrapper[4753]: I0129 12:25:59.530496 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/404ab5c7-422b-4264-baf7-64ab2857eb4b-kube-api-access-h66g6" (OuterVolumeSpecName: "kube-api-access-h66g6") pod "404ab5c7-422b-4264-baf7-64ab2857eb4b" (UID: "404ab5c7-422b-4264-baf7-64ab2857eb4b"). InnerVolumeSpecName "kube-api-access-h66g6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:25:59 crc kubenswrapper[4753]: I0129 12:25:59.532058 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/404ab5c7-422b-4264-baf7-64ab2857eb4b-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "404ab5c7-422b-4264-baf7-64ab2857eb4b" (UID: "404ab5c7-422b-4264-baf7-64ab2857eb4b"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:25:59 crc kubenswrapper[4753]: I0129 12:25:59.547641 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/404ab5c7-422b-4264-baf7-64ab2857eb4b-config-data" (OuterVolumeSpecName: "config-data") pod "404ab5c7-422b-4264-baf7-64ab2857eb4b" (UID: "404ab5c7-422b-4264-baf7-64ab2857eb4b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:25:59 crc kubenswrapper[4753]: I0129 12:25:59.624628 4753 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/404ab5c7-422b-4264-baf7-64ab2857eb4b-credential-keys\") on node \"crc\" DevicePath \"\"" Jan 29 12:25:59 crc kubenswrapper[4753]: I0129 12:25:59.624662 4753 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/404ab5c7-422b-4264-baf7-64ab2857eb4b-fernet-keys\") on node \"crc\" DevicePath \"\"" Jan 29 12:25:59 crc kubenswrapper[4753]: I0129 12:25:59.624674 4753 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/404ab5c7-422b-4264-baf7-64ab2857eb4b-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 12:25:59 crc kubenswrapper[4753]: I0129 12:25:59.624686 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h66g6\" (UniqueName: \"kubernetes.io/projected/404ab5c7-422b-4264-baf7-64ab2857eb4b-kube-api-access-h66g6\") on node \"crc\" DevicePath \"\"" Jan 29 12:25:59 crc kubenswrapper[4753]: I0129 12:25:59.624696 4753 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/404ab5c7-422b-4264-baf7-64ab2857eb4b-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 12:25:59 crc kubenswrapper[4753]: I0129 12:25:59.993979 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/keystone-bootstrap-fgpnh" event={"ID":"404ab5c7-422b-4264-baf7-64ab2857eb4b","Type":"ContainerDied","Data":"2ea78adaa4168248e11fdc15df1fcf8bebfa5624c1d72c72ce255e5730cf2474"} Jan 29 12:25:59 crc kubenswrapper[4753]: I0129 12:25:59.994032 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/keystone-bootstrap-fgpnh" Jan 29 12:25:59 crc kubenswrapper[4753]: I0129 12:25:59.994043 4753 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2ea78adaa4168248e11fdc15df1fcf8bebfa5624c1d72c72ce255e5730cf2474" Jan 29 12:26:00 crc kubenswrapper[4753]: I0129 12:26:00.114092 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["swift-kuttl-tests/keystone-66595c964d-6hnff"] Jan 29 12:26:00 crc kubenswrapper[4753]: E0129 12:26:00.114574 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="404ab5c7-422b-4264-baf7-64ab2857eb4b" containerName="keystone-bootstrap" Jan 29 12:26:00 crc kubenswrapper[4753]: I0129 12:26:00.114606 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="404ab5c7-422b-4264-baf7-64ab2857eb4b" containerName="keystone-bootstrap" Jan 29 12:26:00 crc kubenswrapper[4753]: I0129 12:26:00.114742 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="404ab5c7-422b-4264-baf7-64ab2857eb4b" containerName="keystone-bootstrap" Jan 29 12:26:00 crc kubenswrapper[4753]: I0129 12:26:00.115313 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/keystone-66595c964d-6hnff" Jan 29 12:26:00 crc kubenswrapper[4753]: I0129 12:26:00.117981 4753 reflector.go:368] Caches populated for *v1.Secret from object-"swift-kuttl-tests"/"keystone-scripts" Jan 29 12:26:00 crc kubenswrapper[4753]: I0129 12:26:00.118058 4753 reflector.go:368] Caches populated for *v1.Secret from object-"swift-kuttl-tests"/"keystone-config-data" Jan 29 12:26:00 crc kubenswrapper[4753]: I0129 12:26:00.117982 4753 reflector.go:368] Caches populated for *v1.Secret from object-"swift-kuttl-tests"/"keystone-keystone-dockercfg-6lmn4" Jan 29 12:26:00 crc kubenswrapper[4753]: I0129 12:26:00.122737 4753 reflector.go:368] Caches populated for *v1.Secret from object-"swift-kuttl-tests"/"keystone" Jan 29 12:26:00 crc kubenswrapper[4753]: I0129 12:26:00.124398 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/keystone-66595c964d-6hnff"] Jan 29 12:26:00 crc kubenswrapper[4753]: I0129 12:26:00.316219 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d29j4\" (UniqueName: \"kubernetes.io/projected/7832353e-7e00-44af-bd6a-9227ef58fd05-kube-api-access-d29j4\") pod \"keystone-66595c964d-6hnff\" (UID: \"7832353e-7e00-44af-bd6a-9227ef58fd05\") " pod="swift-kuttl-tests/keystone-66595c964d-6hnff" Jan 29 12:26:00 crc kubenswrapper[4753]: I0129 12:26:00.316790 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7832353e-7e00-44af-bd6a-9227ef58fd05-scripts\") pod \"keystone-66595c964d-6hnff\" (UID: \"7832353e-7e00-44af-bd6a-9227ef58fd05\") " pod="swift-kuttl-tests/keystone-66595c964d-6hnff" Jan 29 12:26:00 crc kubenswrapper[4753]: I0129 12:26:00.316823 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7832353e-7e00-44af-bd6a-9227ef58fd05-config-data\") pod \"keystone-66595c964d-6hnff\" (UID: \"7832353e-7e00-44af-bd6a-9227ef58fd05\") " pod="swift-kuttl-tests/keystone-66595c964d-6hnff" Jan 29 12:26:00 crc kubenswrapper[4753]: I0129 12:26:00.316888 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/7832353e-7e00-44af-bd6a-9227ef58fd05-fernet-keys\") pod \"keystone-66595c964d-6hnff\" (UID: \"7832353e-7e00-44af-bd6a-9227ef58fd05\") " pod="swift-kuttl-tests/keystone-66595c964d-6hnff" Jan 29 12:26:00 crc kubenswrapper[4753]: I0129 12:26:00.316918 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/7832353e-7e00-44af-bd6a-9227ef58fd05-credential-keys\") pod \"keystone-66595c964d-6hnff\" (UID: \"7832353e-7e00-44af-bd6a-9227ef58fd05\") " pod="swift-kuttl-tests/keystone-66595c964d-6hnff" Jan 29 12:26:00 crc kubenswrapper[4753]: I0129 12:26:00.418443 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d29j4\" (UniqueName: \"kubernetes.io/projected/7832353e-7e00-44af-bd6a-9227ef58fd05-kube-api-access-d29j4\") pod \"keystone-66595c964d-6hnff\" (UID: \"7832353e-7e00-44af-bd6a-9227ef58fd05\") " pod="swift-kuttl-tests/keystone-66595c964d-6hnff" Jan 29 12:26:00 crc kubenswrapper[4753]: I0129 12:26:00.418506 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7832353e-7e00-44af-bd6a-9227ef58fd05-scripts\") pod \"keystone-66595c964d-6hnff\" (UID: \"7832353e-7e00-44af-bd6a-9227ef58fd05\") " pod="swift-kuttl-tests/keystone-66595c964d-6hnff" Jan 29 12:26:00 crc kubenswrapper[4753]: I0129 12:26:00.418535 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7832353e-7e00-44af-bd6a-9227ef58fd05-config-data\") pod \"keystone-66595c964d-6hnff\" (UID: \"7832353e-7e00-44af-bd6a-9227ef58fd05\") " pod="swift-kuttl-tests/keystone-66595c964d-6hnff" Jan 29 12:26:00 crc kubenswrapper[4753]: I0129 12:26:00.418596 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/7832353e-7e00-44af-bd6a-9227ef58fd05-fernet-keys\") pod \"keystone-66595c964d-6hnff\" (UID: \"7832353e-7e00-44af-bd6a-9227ef58fd05\") " pod="swift-kuttl-tests/keystone-66595c964d-6hnff" Jan 29 12:26:00 crc kubenswrapper[4753]: I0129 12:26:00.418628 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/7832353e-7e00-44af-bd6a-9227ef58fd05-credential-keys\") pod \"keystone-66595c964d-6hnff\" (UID: \"7832353e-7e00-44af-bd6a-9227ef58fd05\") " pod="swift-kuttl-tests/keystone-66595c964d-6hnff" Jan 29 12:26:00 crc kubenswrapper[4753]: I0129 12:26:00.423398 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7832353e-7e00-44af-bd6a-9227ef58fd05-scripts\") pod \"keystone-66595c964d-6hnff\" (UID: \"7832353e-7e00-44af-bd6a-9227ef58fd05\") " pod="swift-kuttl-tests/keystone-66595c964d-6hnff" Jan 29 12:26:00 crc kubenswrapper[4753]: I0129 12:26:00.423572 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/7832353e-7e00-44af-bd6a-9227ef58fd05-credential-keys\") pod \"keystone-66595c964d-6hnff\" (UID: \"7832353e-7e00-44af-bd6a-9227ef58fd05\") " pod="swift-kuttl-tests/keystone-66595c964d-6hnff" Jan 29 12:26:00 crc kubenswrapper[4753]: I0129 12:26:00.424100 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/7832353e-7e00-44af-bd6a-9227ef58fd05-fernet-keys\") pod \"keystone-66595c964d-6hnff\" (UID: \"7832353e-7e00-44af-bd6a-9227ef58fd05\") " pod="swift-kuttl-tests/keystone-66595c964d-6hnff" Jan 29 12:26:00 crc kubenswrapper[4753]: I0129 12:26:00.431058 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7832353e-7e00-44af-bd6a-9227ef58fd05-config-data\") pod \"keystone-66595c964d-6hnff\" (UID: \"7832353e-7e00-44af-bd6a-9227ef58fd05\") " pod="swift-kuttl-tests/keystone-66595c964d-6hnff" Jan 29 12:26:00 crc kubenswrapper[4753]: I0129 12:26:00.436720 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d29j4\" (UniqueName: \"kubernetes.io/projected/7832353e-7e00-44af-bd6a-9227ef58fd05-kube-api-access-d29j4\") pod \"keystone-66595c964d-6hnff\" (UID: \"7832353e-7e00-44af-bd6a-9227ef58fd05\") " pod="swift-kuttl-tests/keystone-66595c964d-6hnff" Jan 29 12:26:00 crc kubenswrapper[4753]: I0129 12:26:00.459933 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/keystone-66595c964d-6hnff" Jan 29 12:26:00 crc kubenswrapper[4753]: I0129 12:26:00.692846 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/keystone-66595c964d-6hnff"] Jan 29 12:26:01 crc kubenswrapper[4753]: I0129 12:26:01.004309 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/keystone-66595c964d-6hnff" event={"ID":"7832353e-7e00-44af-bd6a-9227ef58fd05","Type":"ContainerStarted","Data":"56e14626896d6f1f29b27a6eed7fc588033ff40f6c37175ca5a67638ead7c7b0"} Jan 29 12:26:01 crc kubenswrapper[4753]: I0129 12:26:01.004697 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/keystone-66595c964d-6hnff" event={"ID":"7832353e-7e00-44af-bd6a-9227ef58fd05","Type":"ContainerStarted","Data":"b3a3769ab58213d819f63108d8a2310fcd2886e1cde5b8b7dd7e7967350ac5d8"} Jan 29 12:26:01 crc kubenswrapper[4753]: I0129 12:26:01.004767 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="swift-kuttl-tests/keystone-66595c964d-6hnff" Jan 29 12:26:01 crc kubenswrapper[4753]: I0129 12:26:01.030161 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="swift-kuttl-tests/keystone-66595c964d-6hnff" podStartSLOduration=1.030102058 podStartE2EDuration="1.030102058s" podCreationTimestamp="2026-01-29 12:26:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:26:01.01899296 +0000 UTC m=+1175.271074425" watchObservedRunningTime="2026-01-29 12:26:01.030102058 +0000 UTC m=+1175.282183523" Jan 29 12:26:03 crc kubenswrapper[4753]: I0129 12:26:03.120157 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/barbican-operator-controller-manager-696f9bf98-6q66p"] Jan 29 12:26:03 crc kubenswrapper[4753]: I0129 12:26:03.121437 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-696f9bf98-6q66p" Jan 29 12:26:03 crc kubenswrapper[4753]: I0129 12:26:03.123662 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-service-cert" Jan 29 12:26:03 crc kubenswrapper[4753]: I0129 12:26:03.124160 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-dockercfg-fqgmv" Jan 29 12:26:03 crc kubenswrapper[4753]: I0129 12:26:03.138897 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-696f9bf98-6q66p"] Jan 29 12:26:03 crc kubenswrapper[4753]: I0129 12:26:03.242626 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/3b46211c-99c7-44c9-8275-991e70edba9d-webhook-cert\") pod \"barbican-operator-controller-manager-696f9bf98-6q66p\" (UID: \"3b46211c-99c7-44c9-8275-991e70edba9d\") " pod="openstack-operators/barbican-operator-controller-manager-696f9bf98-6q66p" Jan 29 12:26:03 crc kubenswrapper[4753]: I0129 12:26:03.242695 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/3b46211c-99c7-44c9-8275-991e70edba9d-apiservice-cert\") pod \"barbican-operator-controller-manager-696f9bf98-6q66p\" (UID: \"3b46211c-99c7-44c9-8275-991e70edba9d\") " pod="openstack-operators/barbican-operator-controller-manager-696f9bf98-6q66p" Jan 29 12:26:03 crc kubenswrapper[4753]: I0129 12:26:03.242727 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z54gg\" (UniqueName: \"kubernetes.io/projected/3b46211c-99c7-44c9-8275-991e70edba9d-kube-api-access-z54gg\") pod \"barbican-operator-controller-manager-696f9bf98-6q66p\" (UID: \"3b46211c-99c7-44c9-8275-991e70edba9d\") " pod="openstack-operators/barbican-operator-controller-manager-696f9bf98-6q66p" Jan 29 12:26:03 crc kubenswrapper[4753]: I0129 12:26:03.344997 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/3b46211c-99c7-44c9-8275-991e70edba9d-webhook-cert\") pod \"barbican-operator-controller-manager-696f9bf98-6q66p\" (UID: \"3b46211c-99c7-44c9-8275-991e70edba9d\") " pod="openstack-operators/barbican-operator-controller-manager-696f9bf98-6q66p" Jan 29 12:26:03 crc kubenswrapper[4753]: I0129 12:26:03.345186 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/3b46211c-99c7-44c9-8275-991e70edba9d-apiservice-cert\") pod \"barbican-operator-controller-manager-696f9bf98-6q66p\" (UID: \"3b46211c-99c7-44c9-8275-991e70edba9d\") " pod="openstack-operators/barbican-operator-controller-manager-696f9bf98-6q66p" Jan 29 12:26:03 crc kubenswrapper[4753]: I0129 12:26:03.346529 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z54gg\" (UniqueName: \"kubernetes.io/projected/3b46211c-99c7-44c9-8275-991e70edba9d-kube-api-access-z54gg\") pod \"barbican-operator-controller-manager-696f9bf98-6q66p\" (UID: \"3b46211c-99c7-44c9-8275-991e70edba9d\") " pod="openstack-operators/barbican-operator-controller-manager-696f9bf98-6q66p" Jan 29 12:26:03 crc kubenswrapper[4753]: I0129 12:26:03.352875 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/3b46211c-99c7-44c9-8275-991e70edba9d-webhook-cert\") pod \"barbican-operator-controller-manager-696f9bf98-6q66p\" (UID: \"3b46211c-99c7-44c9-8275-991e70edba9d\") " pod="openstack-operators/barbican-operator-controller-manager-696f9bf98-6q66p" Jan 29 12:26:03 crc kubenswrapper[4753]: I0129 12:26:03.353982 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/3b46211c-99c7-44c9-8275-991e70edba9d-apiservice-cert\") pod \"barbican-operator-controller-manager-696f9bf98-6q66p\" (UID: \"3b46211c-99c7-44c9-8275-991e70edba9d\") " pod="openstack-operators/barbican-operator-controller-manager-696f9bf98-6q66p" Jan 29 12:26:03 crc kubenswrapper[4753]: I0129 12:26:03.369095 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z54gg\" (UniqueName: \"kubernetes.io/projected/3b46211c-99c7-44c9-8275-991e70edba9d-kube-api-access-z54gg\") pod \"barbican-operator-controller-manager-696f9bf98-6q66p\" (UID: \"3b46211c-99c7-44c9-8275-991e70edba9d\") " pod="openstack-operators/barbican-operator-controller-manager-696f9bf98-6q66p" Jan 29 12:26:03 crc kubenswrapper[4753]: I0129 12:26:03.447419 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-696f9bf98-6q66p" Jan 29 12:26:03 crc kubenswrapper[4753]: I0129 12:26:03.986765 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-696f9bf98-6q66p"] Jan 29 12:26:04 crc kubenswrapper[4753]: W0129 12:26:04.002430 4753 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3b46211c_99c7_44c9_8275_991e70edba9d.slice/crio-fcbcf922fda874c0af0c761cfc5cac92a0d0e03d6f7b1e5b69dc291a86ac9e97 WatchSource:0}: Error finding container fcbcf922fda874c0af0c761cfc5cac92a0d0e03d6f7b1e5b69dc291a86ac9e97: Status 404 returned error can't find the container with id fcbcf922fda874c0af0c761cfc5cac92a0d0e03d6f7b1e5b69dc291a86ac9e97 Jan 29 12:26:04 crc kubenswrapper[4753]: I0129 12:26:04.033934 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-696f9bf98-6q66p" event={"ID":"3b46211c-99c7-44c9-8275-991e70edba9d","Type":"ContainerStarted","Data":"fcbcf922fda874c0af0c761cfc5cac92a0d0e03d6f7b1e5b69dc291a86ac9e97"} Jan 29 12:26:07 crc kubenswrapper[4753]: I0129 12:26:07.069163 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-696f9bf98-6q66p" event={"ID":"3b46211c-99c7-44c9-8275-991e70edba9d","Type":"ContainerStarted","Data":"af77e7829ca3e4dec8874ed7b84afe03fc47c6ba2a92cf820ffeb3981b73db6a"} Jan 29 12:26:07 crc kubenswrapper[4753]: I0129 12:26:07.069735 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-696f9bf98-6q66p" Jan 29 12:26:07 crc kubenswrapper[4753]: I0129 12:26:07.097098 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/barbican-operator-controller-manager-696f9bf98-6q66p" podStartSLOduration=1.789907664 podStartE2EDuration="4.097043864s" podCreationTimestamp="2026-01-29 12:26:03 +0000 UTC" firstStartedPulling="2026-01-29 12:26:04.00458289 +0000 UTC m=+1178.256664355" lastFinishedPulling="2026-01-29 12:26:06.3117191 +0000 UTC m=+1180.563800555" observedRunningTime="2026-01-29 12:26:07.090420731 +0000 UTC m=+1181.342502206" watchObservedRunningTime="2026-01-29 12:26:07.097043864 +0000 UTC m=+1181.349125329" Jan 29 12:26:13 crc kubenswrapper[4753]: I0129 12:26:13.452539 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-696f9bf98-6q66p" Jan 29 12:26:22 crc kubenswrapper[4753]: I0129 12:26:22.731397 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/swift-operator-index-g5k7d"] Jan 29 12:26:22 crc kubenswrapper[4753]: I0129 12:26:22.732870 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-index-g5k7d" Jan 29 12:26:22 crc kubenswrapper[4753]: I0129 12:26:22.745638 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-index-dockercfg-p9d7h" Jan 29 12:26:22 crc kubenswrapper[4753]: I0129 12:26:22.745975 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-index-g5k7d"] Jan 29 12:26:22 crc kubenswrapper[4753]: I0129 12:26:22.781100 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vfq8s\" (UniqueName: \"kubernetes.io/projected/01f1fcff-ebfe-4f44-9ed6-ea5308d4f2b6-kube-api-access-vfq8s\") pod \"swift-operator-index-g5k7d\" (UID: \"01f1fcff-ebfe-4f44-9ed6-ea5308d4f2b6\") " pod="openstack-operators/swift-operator-index-g5k7d" Jan 29 12:26:22 crc kubenswrapper[4753]: I0129 12:26:22.883007 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vfq8s\" (UniqueName: \"kubernetes.io/projected/01f1fcff-ebfe-4f44-9ed6-ea5308d4f2b6-kube-api-access-vfq8s\") pod \"swift-operator-index-g5k7d\" (UID: \"01f1fcff-ebfe-4f44-9ed6-ea5308d4f2b6\") " pod="openstack-operators/swift-operator-index-g5k7d" Jan 29 12:26:22 crc kubenswrapper[4753]: I0129 12:26:22.904951 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vfq8s\" (UniqueName: \"kubernetes.io/projected/01f1fcff-ebfe-4f44-9ed6-ea5308d4f2b6-kube-api-access-vfq8s\") pod \"swift-operator-index-g5k7d\" (UID: \"01f1fcff-ebfe-4f44-9ed6-ea5308d4f2b6\") " pod="openstack-operators/swift-operator-index-g5k7d" Jan 29 12:26:23 crc kubenswrapper[4753]: I0129 12:26:23.058667 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-index-g5k7d" Jan 29 12:26:23 crc kubenswrapper[4753]: I0129 12:26:23.672269 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-index-g5k7d"] Jan 29 12:26:24 crc kubenswrapper[4753]: I0129 12:26:24.343203 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-index-g5k7d" event={"ID":"01f1fcff-ebfe-4f44-9ed6-ea5308d4f2b6","Type":"ContainerStarted","Data":"cb6741383d82137889fe7863df9f7133eca961473d0aecfd85df91ed485c3eb7"} Jan 29 12:26:27 crc kubenswrapper[4753]: I0129 12:26:27.371358 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-index-g5k7d" event={"ID":"01f1fcff-ebfe-4f44-9ed6-ea5308d4f2b6","Type":"ContainerStarted","Data":"82e9d7a70f0b4833f80f4d86dd3202dbe66cf538ebb9baf0f943b314054e5c86"} Jan 29 12:26:27 crc kubenswrapper[4753]: I0129 12:26:27.396951 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/swift-operator-index-g5k7d" podStartSLOduration=2.702057256 podStartE2EDuration="5.396928641s" podCreationTimestamp="2026-01-29 12:26:22 +0000 UTC" firstStartedPulling="2026-01-29 12:26:23.684590571 +0000 UTC m=+1197.936672036" lastFinishedPulling="2026-01-29 12:26:26.379461966 +0000 UTC m=+1200.631543421" observedRunningTime="2026-01-29 12:26:27.390971836 +0000 UTC m=+1201.643053301" watchObservedRunningTime="2026-01-29 12:26:27.396928641 +0000 UTC m=+1201.649010096" Jan 29 12:26:29 crc kubenswrapper[4753]: I0129 12:26:29.252749 4753 patch_prober.go:28] interesting pod/machine-config-daemon-7c24x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 12:26:29 crc kubenswrapper[4753]: I0129 12:26:29.253117 4753 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" podUID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 12:26:30 crc kubenswrapper[4753]: I0129 12:26:30.052512 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["swift-kuttl-tests/barbican-db-create-wbnw7"] Jan 29 12:26:30 crc kubenswrapper[4753]: I0129 12:26:30.053956 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/barbican-db-create-wbnw7" Jan 29 12:26:30 crc kubenswrapper[4753]: I0129 12:26:30.060447 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["swift-kuttl-tests/barbican-c51a-account-create-update-w5dvf"] Jan 29 12:26:30 crc kubenswrapper[4753]: I0129 12:26:30.061626 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/barbican-c51a-account-create-update-w5dvf" Jan 29 12:26:30 crc kubenswrapper[4753]: I0129 12:26:30.069208 4753 reflector.go:368] Caches populated for *v1.Secret from object-"swift-kuttl-tests"/"barbican-db-secret" Jan 29 12:26:30 crc kubenswrapper[4753]: I0129 12:26:30.075180 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/barbican-db-create-wbnw7"] Jan 29 12:26:30 crc kubenswrapper[4753]: I0129 12:26:30.079843 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/barbican-c51a-account-create-update-w5dvf"] Jan 29 12:26:30 crc kubenswrapper[4753]: I0129 12:26:30.156796 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5db8a16d-47c1-4e9c-870d-a8d6490202e8-operator-scripts\") pod \"barbican-c51a-account-create-update-w5dvf\" (UID: \"5db8a16d-47c1-4e9c-870d-a8d6490202e8\") " pod="swift-kuttl-tests/barbican-c51a-account-create-update-w5dvf" Jan 29 12:26:30 crc kubenswrapper[4753]: I0129 12:26:30.156859 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sf7ln\" (UniqueName: \"kubernetes.io/projected/d138c317-92d8-4dc4-ac16-87975335e9fb-kube-api-access-sf7ln\") pod \"barbican-db-create-wbnw7\" (UID: \"d138c317-92d8-4dc4-ac16-87975335e9fb\") " pod="swift-kuttl-tests/barbican-db-create-wbnw7" Jan 29 12:26:30 crc kubenswrapper[4753]: I0129 12:26:30.157193 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pd24w\" (UniqueName: \"kubernetes.io/projected/5db8a16d-47c1-4e9c-870d-a8d6490202e8-kube-api-access-pd24w\") pod \"barbican-c51a-account-create-update-w5dvf\" (UID: \"5db8a16d-47c1-4e9c-870d-a8d6490202e8\") " pod="swift-kuttl-tests/barbican-c51a-account-create-update-w5dvf" Jan 29 12:26:30 crc kubenswrapper[4753]: I0129 12:26:30.157242 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d138c317-92d8-4dc4-ac16-87975335e9fb-operator-scripts\") pod \"barbican-db-create-wbnw7\" (UID: \"d138c317-92d8-4dc4-ac16-87975335e9fb\") " pod="swift-kuttl-tests/barbican-db-create-wbnw7" Jan 29 12:26:30 crc kubenswrapper[4753]: I0129 12:26:30.258500 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pd24w\" (UniqueName: \"kubernetes.io/projected/5db8a16d-47c1-4e9c-870d-a8d6490202e8-kube-api-access-pd24w\") pod \"barbican-c51a-account-create-update-w5dvf\" (UID: \"5db8a16d-47c1-4e9c-870d-a8d6490202e8\") " pod="swift-kuttl-tests/barbican-c51a-account-create-update-w5dvf" Jan 29 12:26:30 crc kubenswrapper[4753]: I0129 12:26:30.258566 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d138c317-92d8-4dc4-ac16-87975335e9fb-operator-scripts\") pod \"barbican-db-create-wbnw7\" (UID: \"d138c317-92d8-4dc4-ac16-87975335e9fb\") " pod="swift-kuttl-tests/barbican-db-create-wbnw7" Jan 29 12:26:30 crc kubenswrapper[4753]: I0129 12:26:30.258631 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5db8a16d-47c1-4e9c-870d-a8d6490202e8-operator-scripts\") pod \"barbican-c51a-account-create-update-w5dvf\" (UID: \"5db8a16d-47c1-4e9c-870d-a8d6490202e8\") " pod="swift-kuttl-tests/barbican-c51a-account-create-update-w5dvf" Jan 29 12:26:30 crc kubenswrapper[4753]: I0129 12:26:30.258660 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sf7ln\" (UniqueName: \"kubernetes.io/projected/d138c317-92d8-4dc4-ac16-87975335e9fb-kube-api-access-sf7ln\") pod \"barbican-db-create-wbnw7\" (UID: \"d138c317-92d8-4dc4-ac16-87975335e9fb\") " pod="swift-kuttl-tests/barbican-db-create-wbnw7" Jan 29 12:26:30 crc kubenswrapper[4753]: I0129 12:26:30.259629 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5db8a16d-47c1-4e9c-870d-a8d6490202e8-operator-scripts\") pod \"barbican-c51a-account-create-update-w5dvf\" (UID: \"5db8a16d-47c1-4e9c-870d-a8d6490202e8\") " pod="swift-kuttl-tests/barbican-c51a-account-create-update-w5dvf" Jan 29 12:26:30 crc kubenswrapper[4753]: I0129 12:26:30.259818 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d138c317-92d8-4dc4-ac16-87975335e9fb-operator-scripts\") pod \"barbican-db-create-wbnw7\" (UID: \"d138c317-92d8-4dc4-ac16-87975335e9fb\") " pod="swift-kuttl-tests/barbican-db-create-wbnw7" Jan 29 12:26:30 crc kubenswrapper[4753]: I0129 12:26:30.281643 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sf7ln\" (UniqueName: \"kubernetes.io/projected/d138c317-92d8-4dc4-ac16-87975335e9fb-kube-api-access-sf7ln\") pod \"barbican-db-create-wbnw7\" (UID: \"d138c317-92d8-4dc4-ac16-87975335e9fb\") " pod="swift-kuttl-tests/barbican-db-create-wbnw7" Jan 29 12:26:30 crc kubenswrapper[4753]: I0129 12:26:30.281769 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pd24w\" (UniqueName: \"kubernetes.io/projected/5db8a16d-47c1-4e9c-870d-a8d6490202e8-kube-api-access-pd24w\") pod \"barbican-c51a-account-create-update-w5dvf\" (UID: \"5db8a16d-47c1-4e9c-870d-a8d6490202e8\") " pod="swift-kuttl-tests/barbican-c51a-account-create-update-w5dvf" Jan 29 12:26:30 crc kubenswrapper[4753]: I0129 12:26:30.373788 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/barbican-db-create-wbnw7" Jan 29 12:26:30 crc kubenswrapper[4753]: I0129 12:26:30.384005 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/barbican-c51a-account-create-update-w5dvf" Jan 29 12:26:30 crc kubenswrapper[4753]: I0129 12:26:30.710313 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/barbican-db-create-wbnw7"] Jan 29 12:26:30 crc kubenswrapper[4753]: I0129 12:26:30.745363 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/barbican-c51a-account-create-update-w5dvf"] Jan 29 12:26:30 crc kubenswrapper[4753]: W0129 12:26:30.751810 4753 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5db8a16d_47c1_4e9c_870d_a8d6490202e8.slice/crio-c36423987d5f3e354fa6440c1177d54d6939a372062425ffadc2135ecc38f478 WatchSource:0}: Error finding container c36423987d5f3e354fa6440c1177d54d6939a372062425ffadc2135ecc38f478: Status 404 returned error can't find the container with id c36423987d5f3e354fa6440c1177d54d6939a372062425ffadc2135ecc38f478 Jan 29 12:26:31 crc kubenswrapper[4753]: I0129 12:26:31.398856 4753 generic.go:334] "Generic (PLEG): container finished" podID="d138c317-92d8-4dc4-ac16-87975335e9fb" containerID="84bd8e96b7ca4bb2150e0c9bf51a214d128ba743a9a76d644f83028757efb8cf" exitCode=0 Jan 29 12:26:31 crc kubenswrapper[4753]: I0129 12:26:31.398922 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/barbican-db-create-wbnw7" event={"ID":"d138c317-92d8-4dc4-ac16-87975335e9fb","Type":"ContainerDied","Data":"84bd8e96b7ca4bb2150e0c9bf51a214d128ba743a9a76d644f83028757efb8cf"} Jan 29 12:26:31 crc kubenswrapper[4753]: I0129 12:26:31.399289 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/barbican-db-create-wbnw7" event={"ID":"d138c317-92d8-4dc4-ac16-87975335e9fb","Type":"ContainerStarted","Data":"cd0195311ccc0ad6c940a41ecc55737b56a26f8e48c5f63065fd3f3b6ef7cbdc"} Jan 29 12:26:31 crc kubenswrapper[4753]: I0129 12:26:31.400886 4753 generic.go:334] "Generic (PLEG): container finished" podID="5db8a16d-47c1-4e9c-870d-a8d6490202e8" containerID="b85db201059a7d27aa1f4d5e8e377ef0658f80ca8fc1e9e04678ab46f32f8429" exitCode=0 Jan 29 12:26:31 crc kubenswrapper[4753]: I0129 12:26:31.400938 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/barbican-c51a-account-create-update-w5dvf" event={"ID":"5db8a16d-47c1-4e9c-870d-a8d6490202e8","Type":"ContainerDied","Data":"b85db201059a7d27aa1f4d5e8e377ef0658f80ca8fc1e9e04678ab46f32f8429"} Jan 29 12:26:31 crc kubenswrapper[4753]: I0129 12:26:31.400975 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/barbican-c51a-account-create-update-w5dvf" event={"ID":"5db8a16d-47c1-4e9c-870d-a8d6490202e8","Type":"ContainerStarted","Data":"c36423987d5f3e354fa6440c1177d54d6939a372062425ffadc2135ecc38f478"} Jan 29 12:26:32 crc kubenswrapper[4753]: I0129 12:26:32.213410 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="swift-kuttl-tests/keystone-66595c964d-6hnff" Jan 29 12:26:32 crc kubenswrapper[4753]: I0129 12:26:32.796098 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/barbican-c51a-account-create-update-w5dvf" Jan 29 12:26:32 crc kubenswrapper[4753]: I0129 12:26:32.803074 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/barbican-db-create-wbnw7" Jan 29 12:26:32 crc kubenswrapper[4753]: I0129 12:26:32.940196 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pd24w\" (UniqueName: \"kubernetes.io/projected/5db8a16d-47c1-4e9c-870d-a8d6490202e8-kube-api-access-pd24w\") pod \"5db8a16d-47c1-4e9c-870d-a8d6490202e8\" (UID: \"5db8a16d-47c1-4e9c-870d-a8d6490202e8\") " Jan 29 12:26:32 crc kubenswrapper[4753]: I0129 12:26:32.940308 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d138c317-92d8-4dc4-ac16-87975335e9fb-operator-scripts\") pod \"d138c317-92d8-4dc4-ac16-87975335e9fb\" (UID: \"d138c317-92d8-4dc4-ac16-87975335e9fb\") " Jan 29 12:26:32 crc kubenswrapper[4753]: I0129 12:26:32.940397 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sf7ln\" (UniqueName: \"kubernetes.io/projected/d138c317-92d8-4dc4-ac16-87975335e9fb-kube-api-access-sf7ln\") pod \"d138c317-92d8-4dc4-ac16-87975335e9fb\" (UID: \"d138c317-92d8-4dc4-ac16-87975335e9fb\") " Jan 29 12:26:32 crc kubenswrapper[4753]: I0129 12:26:32.940434 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5db8a16d-47c1-4e9c-870d-a8d6490202e8-operator-scripts\") pod \"5db8a16d-47c1-4e9c-870d-a8d6490202e8\" (UID: \"5db8a16d-47c1-4e9c-870d-a8d6490202e8\") " Jan 29 12:26:32 crc kubenswrapper[4753]: I0129 12:26:32.941154 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5db8a16d-47c1-4e9c-870d-a8d6490202e8-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "5db8a16d-47c1-4e9c-870d-a8d6490202e8" (UID: "5db8a16d-47c1-4e9c-870d-a8d6490202e8"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:26:32 crc kubenswrapper[4753]: I0129 12:26:32.941413 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d138c317-92d8-4dc4-ac16-87975335e9fb-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "d138c317-92d8-4dc4-ac16-87975335e9fb" (UID: "d138c317-92d8-4dc4-ac16-87975335e9fb"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:26:32 crc kubenswrapper[4753]: I0129 12:26:32.945818 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5db8a16d-47c1-4e9c-870d-a8d6490202e8-kube-api-access-pd24w" (OuterVolumeSpecName: "kube-api-access-pd24w") pod "5db8a16d-47c1-4e9c-870d-a8d6490202e8" (UID: "5db8a16d-47c1-4e9c-870d-a8d6490202e8"). InnerVolumeSpecName "kube-api-access-pd24w". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:26:32 crc kubenswrapper[4753]: I0129 12:26:32.945999 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d138c317-92d8-4dc4-ac16-87975335e9fb-kube-api-access-sf7ln" (OuterVolumeSpecName: "kube-api-access-sf7ln") pod "d138c317-92d8-4dc4-ac16-87975335e9fb" (UID: "d138c317-92d8-4dc4-ac16-87975335e9fb"). InnerVolumeSpecName "kube-api-access-sf7ln". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:26:33 crc kubenswrapper[4753]: I0129 12:26:33.041388 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pd24w\" (UniqueName: \"kubernetes.io/projected/5db8a16d-47c1-4e9c-870d-a8d6490202e8-kube-api-access-pd24w\") on node \"crc\" DevicePath \"\"" Jan 29 12:26:33 crc kubenswrapper[4753]: I0129 12:26:33.041425 4753 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d138c317-92d8-4dc4-ac16-87975335e9fb-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 12:26:33 crc kubenswrapper[4753]: I0129 12:26:33.041436 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sf7ln\" (UniqueName: \"kubernetes.io/projected/d138c317-92d8-4dc4-ac16-87975335e9fb-kube-api-access-sf7ln\") on node \"crc\" DevicePath \"\"" Jan 29 12:26:33 crc kubenswrapper[4753]: I0129 12:26:33.041445 4753 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5db8a16d-47c1-4e9c-870d-a8d6490202e8-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 12:26:33 crc kubenswrapper[4753]: I0129 12:26:33.059062 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-index-g5k7d" Jan 29 12:26:33 crc kubenswrapper[4753]: I0129 12:26:33.060573 4753 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/swift-operator-index-g5k7d" Jan 29 12:26:33 crc kubenswrapper[4753]: I0129 12:26:33.216412 4753 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/swift-operator-index-g5k7d" Jan 29 12:26:33 crc kubenswrapper[4753]: I0129 12:26:33.428871 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/barbican-db-create-wbnw7" Jan 29 12:26:33 crc kubenswrapper[4753]: I0129 12:26:33.428925 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/barbican-db-create-wbnw7" event={"ID":"d138c317-92d8-4dc4-ac16-87975335e9fb","Type":"ContainerDied","Data":"cd0195311ccc0ad6c940a41ecc55737b56a26f8e48c5f63065fd3f3b6ef7cbdc"} Jan 29 12:26:33 crc kubenswrapper[4753]: I0129 12:26:33.428991 4753 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cd0195311ccc0ad6c940a41ecc55737b56a26f8e48c5f63065fd3f3b6ef7cbdc" Jan 29 12:26:33 crc kubenswrapper[4753]: I0129 12:26:33.431296 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/barbican-c51a-account-create-update-w5dvf" Jan 29 12:26:33 crc kubenswrapper[4753]: I0129 12:26:33.434273 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/barbican-c51a-account-create-update-w5dvf" event={"ID":"5db8a16d-47c1-4e9c-870d-a8d6490202e8","Type":"ContainerDied","Data":"c36423987d5f3e354fa6440c1177d54d6939a372062425ffadc2135ecc38f478"} Jan 29 12:26:33 crc kubenswrapper[4753]: I0129 12:26:33.434331 4753 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c36423987d5f3e354fa6440c1177d54d6939a372062425ffadc2135ecc38f478" Jan 29 12:26:33 crc kubenswrapper[4753]: I0129 12:26:33.483638 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-index-g5k7d" Jan 29 12:26:35 crc kubenswrapper[4753]: I0129 12:26:35.317693 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["swift-kuttl-tests/barbican-db-sync-gq7gm"] Jan 29 12:26:35 crc kubenswrapper[4753]: E0129 12:26:35.319324 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5db8a16d-47c1-4e9c-870d-a8d6490202e8" containerName="mariadb-account-create-update" Jan 29 12:26:35 crc kubenswrapper[4753]: I0129 12:26:35.319502 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="5db8a16d-47c1-4e9c-870d-a8d6490202e8" containerName="mariadb-account-create-update" Jan 29 12:26:35 crc kubenswrapper[4753]: E0129 12:26:35.319648 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d138c317-92d8-4dc4-ac16-87975335e9fb" containerName="mariadb-database-create" Jan 29 12:26:35 crc kubenswrapper[4753]: I0129 12:26:35.319734 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="d138c317-92d8-4dc4-ac16-87975335e9fb" containerName="mariadb-database-create" Jan 29 12:26:35 crc kubenswrapper[4753]: I0129 12:26:35.320068 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="5db8a16d-47c1-4e9c-870d-a8d6490202e8" containerName="mariadb-account-create-update" Jan 29 12:26:35 crc kubenswrapper[4753]: I0129 12:26:35.320204 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="d138c317-92d8-4dc4-ac16-87975335e9fb" containerName="mariadb-database-create" Jan 29 12:26:35 crc kubenswrapper[4753]: I0129 12:26:35.321055 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/barbican-db-sync-gq7gm" Jan 29 12:26:35 crc kubenswrapper[4753]: I0129 12:26:35.486666 4753 reflector.go:368] Caches populated for *v1.Secret from object-"swift-kuttl-tests"/"barbican-config-data" Jan 29 12:26:35 crc kubenswrapper[4753]: I0129 12:26:35.488310 4753 reflector.go:368] Caches populated for *v1.Secret from object-"swift-kuttl-tests"/"barbican-barbican-dockercfg-n479x" Jan 29 12:26:35 crc kubenswrapper[4753]: I0129 12:26:35.502080 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/barbican-db-sync-gq7gm"] Jan 29 12:26:35 crc kubenswrapper[4753]: I0129 12:26:35.581838 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kwhs7\" (UniqueName: \"kubernetes.io/projected/e51f25a1-62d9-4b9b-8705-82187d0bd515-kube-api-access-kwhs7\") pod \"barbican-db-sync-gq7gm\" (UID: \"e51f25a1-62d9-4b9b-8705-82187d0bd515\") " pod="swift-kuttl-tests/barbican-db-sync-gq7gm" Jan 29 12:26:35 crc kubenswrapper[4753]: I0129 12:26:35.582370 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/e51f25a1-62d9-4b9b-8705-82187d0bd515-db-sync-config-data\") pod \"barbican-db-sync-gq7gm\" (UID: \"e51f25a1-62d9-4b9b-8705-82187d0bd515\") " pod="swift-kuttl-tests/barbican-db-sync-gq7gm" Jan 29 12:26:35 crc kubenswrapper[4753]: I0129 12:26:35.683577 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/e51f25a1-62d9-4b9b-8705-82187d0bd515-db-sync-config-data\") pod \"barbican-db-sync-gq7gm\" (UID: \"e51f25a1-62d9-4b9b-8705-82187d0bd515\") " pod="swift-kuttl-tests/barbican-db-sync-gq7gm" Jan 29 12:26:35 crc kubenswrapper[4753]: I0129 12:26:35.683661 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kwhs7\" (UniqueName: \"kubernetes.io/projected/e51f25a1-62d9-4b9b-8705-82187d0bd515-kube-api-access-kwhs7\") pod \"barbican-db-sync-gq7gm\" (UID: \"e51f25a1-62d9-4b9b-8705-82187d0bd515\") " pod="swift-kuttl-tests/barbican-db-sync-gq7gm" Jan 29 12:26:35 crc kubenswrapper[4753]: I0129 12:26:35.689912 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/e51f25a1-62d9-4b9b-8705-82187d0bd515-db-sync-config-data\") pod \"barbican-db-sync-gq7gm\" (UID: \"e51f25a1-62d9-4b9b-8705-82187d0bd515\") " pod="swift-kuttl-tests/barbican-db-sync-gq7gm" Jan 29 12:26:35 crc kubenswrapper[4753]: I0129 12:26:35.706395 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kwhs7\" (UniqueName: \"kubernetes.io/projected/e51f25a1-62d9-4b9b-8705-82187d0bd515-kube-api-access-kwhs7\") pod \"barbican-db-sync-gq7gm\" (UID: \"e51f25a1-62d9-4b9b-8705-82187d0bd515\") " pod="swift-kuttl-tests/barbican-db-sync-gq7gm" Jan 29 12:26:35 crc kubenswrapper[4753]: I0129 12:26:35.805432 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/barbican-db-sync-gq7gm" Jan 29 12:26:36 crc kubenswrapper[4753]: I0129 12:26:36.515606 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/barbican-db-sync-gq7gm"] Jan 29 12:26:37 crc kubenswrapper[4753]: I0129 12:26:37.503004 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/barbican-db-sync-gq7gm" event={"ID":"e51f25a1-62d9-4b9b-8705-82187d0bd515","Type":"ContainerStarted","Data":"5eda9a2efc998b7fc056aa99dd1fe641bcf89142b25c073d268f92fce3f6a3ba"} Jan 29 12:26:37 crc kubenswrapper[4753]: I0129 12:26:37.534172 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/7c9c431936913d3e6ade54f671817cf9e499411b73d2ff35ef387edf83d52zw"] Jan 29 12:26:37 crc kubenswrapper[4753]: I0129 12:26:37.536818 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/7c9c431936913d3e6ade54f671817cf9e499411b73d2ff35ef387edf83d52zw" Jan 29 12:26:37 crc kubenswrapper[4753]: I0129 12:26:37.539587 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-j2jmz" Jan 29 12:26:37 crc kubenswrapper[4753]: I0129 12:26:37.550268 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/7c9c431936913d3e6ade54f671817cf9e499411b73d2ff35ef387edf83d52zw"] Jan 29 12:26:37 crc kubenswrapper[4753]: I0129 12:26:37.749872 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/952f0548-3b0e-497c-98f1-b34e76762459-util\") pod \"7c9c431936913d3e6ade54f671817cf9e499411b73d2ff35ef387edf83d52zw\" (UID: \"952f0548-3b0e-497c-98f1-b34e76762459\") " pod="openstack-operators/7c9c431936913d3e6ade54f671817cf9e499411b73d2ff35ef387edf83d52zw" Jan 29 12:26:37 crc kubenswrapper[4753]: I0129 12:26:37.749952 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p9mqz\" (UniqueName: \"kubernetes.io/projected/952f0548-3b0e-497c-98f1-b34e76762459-kube-api-access-p9mqz\") pod \"7c9c431936913d3e6ade54f671817cf9e499411b73d2ff35ef387edf83d52zw\" (UID: \"952f0548-3b0e-497c-98f1-b34e76762459\") " pod="openstack-operators/7c9c431936913d3e6ade54f671817cf9e499411b73d2ff35ef387edf83d52zw" Jan 29 12:26:37 crc kubenswrapper[4753]: I0129 12:26:37.749996 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/952f0548-3b0e-497c-98f1-b34e76762459-bundle\") pod \"7c9c431936913d3e6ade54f671817cf9e499411b73d2ff35ef387edf83d52zw\" (UID: \"952f0548-3b0e-497c-98f1-b34e76762459\") " pod="openstack-operators/7c9c431936913d3e6ade54f671817cf9e499411b73d2ff35ef387edf83d52zw" Jan 29 12:26:37 crc kubenswrapper[4753]: I0129 12:26:37.851999 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/952f0548-3b0e-497c-98f1-b34e76762459-util\") pod \"7c9c431936913d3e6ade54f671817cf9e499411b73d2ff35ef387edf83d52zw\" (UID: \"952f0548-3b0e-497c-98f1-b34e76762459\") " pod="openstack-operators/7c9c431936913d3e6ade54f671817cf9e499411b73d2ff35ef387edf83d52zw" Jan 29 12:26:37 crc kubenswrapper[4753]: I0129 12:26:37.851376 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/952f0548-3b0e-497c-98f1-b34e76762459-util\") pod \"7c9c431936913d3e6ade54f671817cf9e499411b73d2ff35ef387edf83d52zw\" (UID: \"952f0548-3b0e-497c-98f1-b34e76762459\") " pod="openstack-operators/7c9c431936913d3e6ade54f671817cf9e499411b73d2ff35ef387edf83d52zw" Jan 29 12:26:37 crc kubenswrapper[4753]: I0129 12:26:37.852632 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p9mqz\" (UniqueName: \"kubernetes.io/projected/952f0548-3b0e-497c-98f1-b34e76762459-kube-api-access-p9mqz\") pod \"7c9c431936913d3e6ade54f671817cf9e499411b73d2ff35ef387edf83d52zw\" (UID: \"952f0548-3b0e-497c-98f1-b34e76762459\") " pod="openstack-operators/7c9c431936913d3e6ade54f671817cf9e499411b73d2ff35ef387edf83d52zw" Jan 29 12:26:37 crc kubenswrapper[4753]: I0129 12:26:37.853019 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/952f0548-3b0e-497c-98f1-b34e76762459-bundle\") pod \"7c9c431936913d3e6ade54f671817cf9e499411b73d2ff35ef387edf83d52zw\" (UID: \"952f0548-3b0e-497c-98f1-b34e76762459\") " pod="openstack-operators/7c9c431936913d3e6ade54f671817cf9e499411b73d2ff35ef387edf83d52zw" Jan 29 12:26:37 crc kubenswrapper[4753]: I0129 12:26:37.853462 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/952f0548-3b0e-497c-98f1-b34e76762459-bundle\") pod \"7c9c431936913d3e6ade54f671817cf9e499411b73d2ff35ef387edf83d52zw\" (UID: \"952f0548-3b0e-497c-98f1-b34e76762459\") " pod="openstack-operators/7c9c431936913d3e6ade54f671817cf9e499411b73d2ff35ef387edf83d52zw" Jan 29 12:26:37 crc kubenswrapper[4753]: I0129 12:26:37.876198 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p9mqz\" (UniqueName: \"kubernetes.io/projected/952f0548-3b0e-497c-98f1-b34e76762459-kube-api-access-p9mqz\") pod \"7c9c431936913d3e6ade54f671817cf9e499411b73d2ff35ef387edf83d52zw\" (UID: \"952f0548-3b0e-497c-98f1-b34e76762459\") " pod="openstack-operators/7c9c431936913d3e6ade54f671817cf9e499411b73d2ff35ef387edf83d52zw" Jan 29 12:26:38 crc kubenswrapper[4753]: I0129 12:26:38.155310 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/7c9c431936913d3e6ade54f671817cf9e499411b73d2ff35ef387edf83d52zw" Jan 29 12:26:38 crc kubenswrapper[4753]: I0129 12:26:38.628767 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/7c9c431936913d3e6ade54f671817cf9e499411b73d2ff35ef387edf83d52zw"] Jan 29 12:26:39 crc kubenswrapper[4753]: I0129 12:26:39.519620 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/7c9c431936913d3e6ade54f671817cf9e499411b73d2ff35ef387edf83d52zw" event={"ID":"952f0548-3b0e-497c-98f1-b34e76762459","Type":"ContainerStarted","Data":"3505b43c9dc5670652003dbefee8db3985f456ddfae8884fb55461c8a8377f0c"} Jan 29 12:26:39 crc kubenswrapper[4753]: I0129 12:26:39.520001 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/7c9c431936913d3e6ade54f671817cf9e499411b73d2ff35ef387edf83d52zw" event={"ID":"952f0548-3b0e-497c-98f1-b34e76762459","Type":"ContainerStarted","Data":"fd5814bc375ba5297a9da51ad484b61a5a8258685526e2181cead21d74182ab1"} Jan 29 12:26:41 crc kubenswrapper[4753]: I0129 12:26:41.699829 4753 generic.go:334] "Generic (PLEG): container finished" podID="952f0548-3b0e-497c-98f1-b34e76762459" containerID="3505b43c9dc5670652003dbefee8db3985f456ddfae8884fb55461c8a8377f0c" exitCode=0 Jan 29 12:26:41 crc kubenswrapper[4753]: I0129 12:26:41.699990 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/7c9c431936913d3e6ade54f671817cf9e499411b73d2ff35ef387edf83d52zw" event={"ID":"952f0548-3b0e-497c-98f1-b34e76762459","Type":"ContainerDied","Data":"3505b43c9dc5670652003dbefee8db3985f456ddfae8884fb55461c8a8377f0c"} Jan 29 12:26:50 crc kubenswrapper[4753]: I0129 12:26:50.860961 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/barbican-db-sync-gq7gm" event={"ID":"e51f25a1-62d9-4b9b-8705-82187d0bd515","Type":"ContainerStarted","Data":"23e39d78164420907cfe0f72f6db0fb34257618d5975d40e0199b96b4f1c5919"} Jan 29 12:26:50 crc kubenswrapper[4753]: I0129 12:26:50.881484 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="swift-kuttl-tests/barbican-db-sync-gq7gm" podStartSLOduration=2.311644834 podStartE2EDuration="15.881459909s" podCreationTimestamp="2026-01-29 12:26:35 +0000 UTC" firstStartedPulling="2026-01-29 12:26:36.529336373 +0000 UTC m=+1210.781417838" lastFinishedPulling="2026-01-29 12:26:50.099151468 +0000 UTC m=+1224.351232913" observedRunningTime="2026-01-29 12:26:50.876502582 +0000 UTC m=+1225.128584047" watchObservedRunningTime="2026-01-29 12:26:50.881459909 +0000 UTC m=+1225.133541364" Jan 29 12:26:52 crc kubenswrapper[4753]: I0129 12:26:52.879990 4753 generic.go:334] "Generic (PLEG): container finished" podID="952f0548-3b0e-497c-98f1-b34e76762459" containerID="a5287cd9f7b12d8a9969f6797a61d02c1feca9578af605310e5f5bb5209cc514" exitCode=0 Jan 29 12:26:52 crc kubenswrapper[4753]: I0129 12:26:52.880109 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/7c9c431936913d3e6ade54f671817cf9e499411b73d2ff35ef387edf83d52zw" event={"ID":"952f0548-3b0e-497c-98f1-b34e76762459","Type":"ContainerDied","Data":"a5287cd9f7b12d8a9969f6797a61d02c1feca9578af605310e5f5bb5209cc514"} Jan 29 12:26:53 crc kubenswrapper[4753]: I0129 12:26:53.892718 4753 generic.go:334] "Generic (PLEG): container finished" podID="952f0548-3b0e-497c-98f1-b34e76762459" containerID="216e929f3512a42b1815165bd3576440eeb076917ddbdfaabe86afb2bde69ed1" exitCode=0 Jan 29 12:26:53 crc kubenswrapper[4753]: I0129 12:26:53.897941 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/7c9c431936913d3e6ade54f671817cf9e499411b73d2ff35ef387edf83d52zw" event={"ID":"952f0548-3b0e-497c-98f1-b34e76762459","Type":"ContainerDied","Data":"216e929f3512a42b1815165bd3576440eeb076917ddbdfaabe86afb2bde69ed1"} Jan 29 12:26:55 crc kubenswrapper[4753]: I0129 12:26:55.182988 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/7c9c431936913d3e6ade54f671817cf9e499411b73d2ff35ef387edf83d52zw" Jan 29 12:26:55 crc kubenswrapper[4753]: I0129 12:26:55.323807 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/952f0548-3b0e-497c-98f1-b34e76762459-bundle\") pod \"952f0548-3b0e-497c-98f1-b34e76762459\" (UID: \"952f0548-3b0e-497c-98f1-b34e76762459\") " Jan 29 12:26:55 crc kubenswrapper[4753]: I0129 12:26:55.323883 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/952f0548-3b0e-497c-98f1-b34e76762459-util\") pod \"952f0548-3b0e-497c-98f1-b34e76762459\" (UID: \"952f0548-3b0e-497c-98f1-b34e76762459\") " Jan 29 12:26:55 crc kubenswrapper[4753]: I0129 12:26:55.323955 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p9mqz\" (UniqueName: \"kubernetes.io/projected/952f0548-3b0e-497c-98f1-b34e76762459-kube-api-access-p9mqz\") pod \"952f0548-3b0e-497c-98f1-b34e76762459\" (UID: \"952f0548-3b0e-497c-98f1-b34e76762459\") " Jan 29 12:26:55 crc kubenswrapper[4753]: I0129 12:26:55.324801 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/952f0548-3b0e-497c-98f1-b34e76762459-bundle" (OuterVolumeSpecName: "bundle") pod "952f0548-3b0e-497c-98f1-b34e76762459" (UID: "952f0548-3b0e-497c-98f1-b34e76762459"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:26:55 crc kubenswrapper[4753]: I0129 12:26:55.331145 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/952f0548-3b0e-497c-98f1-b34e76762459-kube-api-access-p9mqz" (OuterVolumeSpecName: "kube-api-access-p9mqz") pod "952f0548-3b0e-497c-98f1-b34e76762459" (UID: "952f0548-3b0e-497c-98f1-b34e76762459"). InnerVolumeSpecName "kube-api-access-p9mqz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:26:55 crc kubenswrapper[4753]: I0129 12:26:55.335785 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/952f0548-3b0e-497c-98f1-b34e76762459-util" (OuterVolumeSpecName: "util") pod "952f0548-3b0e-497c-98f1-b34e76762459" (UID: "952f0548-3b0e-497c-98f1-b34e76762459"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:26:55 crc kubenswrapper[4753]: I0129 12:26:55.425865 4753 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/952f0548-3b0e-497c-98f1-b34e76762459-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 12:26:55 crc kubenswrapper[4753]: I0129 12:26:55.426354 4753 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/952f0548-3b0e-497c-98f1-b34e76762459-util\") on node \"crc\" DevicePath \"\"" Jan 29 12:26:55 crc kubenswrapper[4753]: I0129 12:26:55.426365 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p9mqz\" (UniqueName: \"kubernetes.io/projected/952f0548-3b0e-497c-98f1-b34e76762459-kube-api-access-p9mqz\") on node \"crc\" DevicePath \"\"" Jan 29 12:26:55 crc kubenswrapper[4753]: I0129 12:26:55.915757 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/7c9c431936913d3e6ade54f671817cf9e499411b73d2ff35ef387edf83d52zw" event={"ID":"952f0548-3b0e-497c-98f1-b34e76762459","Type":"ContainerDied","Data":"fd5814bc375ba5297a9da51ad484b61a5a8258685526e2181cead21d74182ab1"} Jan 29 12:26:55 crc kubenswrapper[4753]: I0129 12:26:55.915807 4753 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fd5814bc375ba5297a9da51ad484b61a5a8258685526e2181cead21d74182ab1" Jan 29 12:26:55 crc kubenswrapper[4753]: I0129 12:26:55.915900 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/7c9c431936913d3e6ade54f671817cf9e499411b73d2ff35ef387edf83d52zw" Jan 29 12:26:59 crc kubenswrapper[4753]: I0129 12:26:59.252862 4753 patch_prober.go:28] interesting pod/machine-config-daemon-7c24x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 12:26:59 crc kubenswrapper[4753]: I0129 12:26:59.253316 4753 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" podUID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 12:26:59 crc kubenswrapper[4753]: I0129 12:26:59.253386 4753 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" Jan 29 12:26:59 crc kubenswrapper[4753]: I0129 12:26:59.254159 4753 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"5697d146fc311e04cb43bd311c6234230b3e6c0088cc915fea2e2ab2972df9e8"} pod="openshift-machine-config-operator/machine-config-daemon-7c24x" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 29 12:26:59 crc kubenswrapper[4753]: I0129 12:26:59.254274 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" podUID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" containerName="machine-config-daemon" containerID="cri-o://5697d146fc311e04cb43bd311c6234230b3e6c0088cc915fea2e2ab2972df9e8" gracePeriod=600 Jan 29 12:27:00 crc kubenswrapper[4753]: I0129 12:27:00.016964 4753 generic.go:334] "Generic (PLEG): container finished" podID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" containerID="5697d146fc311e04cb43bd311c6234230b3e6c0088cc915fea2e2ab2972df9e8" exitCode=0 Jan 29 12:27:00 crc kubenswrapper[4753]: I0129 12:27:00.017614 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" event={"ID":"b0310995-a7c7-47c3-ae6c-05daaaba92a6","Type":"ContainerDied","Data":"5697d146fc311e04cb43bd311c6234230b3e6c0088cc915fea2e2ab2972df9e8"} Jan 29 12:27:00 crc kubenswrapper[4753]: I0129 12:27:00.017679 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" event={"ID":"b0310995-a7c7-47c3-ae6c-05daaaba92a6","Type":"ContainerStarted","Data":"46e4d0e61ffa31047d03b8be433c21bd5af5bab8ccbf094d8375450be774834e"} Jan 29 12:27:00 crc kubenswrapper[4753]: I0129 12:27:00.017747 4753 scope.go:117] "RemoveContainer" containerID="a062eb50ea7e6f751f3ad76c7903eb2507edfb5f26af3ebaf7ccc8d61960baaf" Jan 29 12:27:02 crc kubenswrapper[4753]: I0129 12:27:02.042743 4753 generic.go:334] "Generic (PLEG): container finished" podID="e51f25a1-62d9-4b9b-8705-82187d0bd515" containerID="23e39d78164420907cfe0f72f6db0fb34257618d5975d40e0199b96b4f1c5919" exitCode=0 Jan 29 12:27:02 crc kubenswrapper[4753]: I0129 12:27:02.043287 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/barbican-db-sync-gq7gm" event={"ID":"e51f25a1-62d9-4b9b-8705-82187d0bd515","Type":"ContainerDied","Data":"23e39d78164420907cfe0f72f6db0fb34257618d5975d40e0199b96b4f1c5919"} Jan 29 12:27:03 crc kubenswrapper[4753]: I0129 12:27:03.325523 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/barbican-db-sync-gq7gm" Jan 29 12:27:03 crc kubenswrapper[4753]: I0129 12:27:03.395785 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/e51f25a1-62d9-4b9b-8705-82187d0bd515-db-sync-config-data\") pod \"e51f25a1-62d9-4b9b-8705-82187d0bd515\" (UID: \"e51f25a1-62d9-4b9b-8705-82187d0bd515\") " Jan 29 12:27:03 crc kubenswrapper[4753]: I0129 12:27:03.395908 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kwhs7\" (UniqueName: \"kubernetes.io/projected/e51f25a1-62d9-4b9b-8705-82187d0bd515-kube-api-access-kwhs7\") pod \"e51f25a1-62d9-4b9b-8705-82187d0bd515\" (UID: \"e51f25a1-62d9-4b9b-8705-82187d0bd515\") " Jan 29 12:27:03 crc kubenswrapper[4753]: I0129 12:27:03.404495 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e51f25a1-62d9-4b9b-8705-82187d0bd515-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "e51f25a1-62d9-4b9b-8705-82187d0bd515" (UID: "e51f25a1-62d9-4b9b-8705-82187d0bd515"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:27:03 crc kubenswrapper[4753]: I0129 12:27:03.404761 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e51f25a1-62d9-4b9b-8705-82187d0bd515-kube-api-access-kwhs7" (OuterVolumeSpecName: "kube-api-access-kwhs7") pod "e51f25a1-62d9-4b9b-8705-82187d0bd515" (UID: "e51f25a1-62d9-4b9b-8705-82187d0bd515"). InnerVolumeSpecName "kube-api-access-kwhs7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:27:03 crc kubenswrapper[4753]: I0129 12:27:03.497095 4753 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/e51f25a1-62d9-4b9b-8705-82187d0bd515-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 12:27:03 crc kubenswrapper[4753]: I0129 12:27:03.497127 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kwhs7\" (UniqueName: \"kubernetes.io/projected/e51f25a1-62d9-4b9b-8705-82187d0bd515-kube-api-access-kwhs7\") on node \"crc\" DevicePath \"\"" Jan 29 12:27:04 crc kubenswrapper[4753]: I0129 12:27:04.063317 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/barbican-db-sync-gq7gm" event={"ID":"e51f25a1-62d9-4b9b-8705-82187d0bd515","Type":"ContainerDied","Data":"5eda9a2efc998b7fc056aa99dd1fe641bcf89142b25c073d268f92fce3f6a3ba"} Jan 29 12:27:04 crc kubenswrapper[4753]: I0129 12:27:04.063689 4753 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5eda9a2efc998b7fc056aa99dd1fe641bcf89142b25c073d268f92fce3f6a3ba" Jan 29 12:27:04 crc kubenswrapper[4753]: I0129 12:27:04.063731 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/barbican-db-sync-gq7gm" Jan 29 12:27:04 crc kubenswrapper[4753]: I0129 12:27:04.417110 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["swift-kuttl-tests/barbican-worker-d6d6cc49c-6q5wj"] Jan 29 12:27:04 crc kubenswrapper[4753]: E0129 12:27:04.417388 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="952f0548-3b0e-497c-98f1-b34e76762459" containerName="extract" Jan 29 12:27:04 crc kubenswrapper[4753]: I0129 12:27:04.417416 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="952f0548-3b0e-497c-98f1-b34e76762459" containerName="extract" Jan 29 12:27:04 crc kubenswrapper[4753]: E0129 12:27:04.417438 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="952f0548-3b0e-497c-98f1-b34e76762459" containerName="util" Jan 29 12:27:04 crc kubenswrapper[4753]: I0129 12:27:04.417444 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="952f0548-3b0e-497c-98f1-b34e76762459" containerName="util" Jan 29 12:27:04 crc kubenswrapper[4753]: E0129 12:27:04.417452 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="952f0548-3b0e-497c-98f1-b34e76762459" containerName="pull" Jan 29 12:27:04 crc kubenswrapper[4753]: I0129 12:27:04.417459 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="952f0548-3b0e-497c-98f1-b34e76762459" containerName="pull" Jan 29 12:27:04 crc kubenswrapper[4753]: E0129 12:27:04.417473 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e51f25a1-62d9-4b9b-8705-82187d0bd515" containerName="barbican-db-sync" Jan 29 12:27:04 crc kubenswrapper[4753]: I0129 12:27:04.417479 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="e51f25a1-62d9-4b9b-8705-82187d0bd515" containerName="barbican-db-sync" Jan 29 12:27:04 crc kubenswrapper[4753]: I0129 12:27:04.417605 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="e51f25a1-62d9-4b9b-8705-82187d0bd515" containerName="barbican-db-sync" Jan 29 12:27:04 crc kubenswrapper[4753]: I0129 12:27:04.417621 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="952f0548-3b0e-497c-98f1-b34e76762459" containerName="extract" Jan 29 12:27:04 crc kubenswrapper[4753]: I0129 12:27:04.418507 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/barbican-worker-d6d6cc49c-6q5wj" Jan 29 12:27:04 crc kubenswrapper[4753]: I0129 12:27:04.429654 4753 reflector.go:368] Caches populated for *v1.Secret from object-"swift-kuttl-tests"/"barbican-worker-config-data" Jan 29 12:27:04 crc kubenswrapper[4753]: I0129 12:27:04.430335 4753 reflector.go:368] Caches populated for *v1.Secret from object-"swift-kuttl-tests"/"barbican-config-data" Jan 29 12:27:04 crc kubenswrapper[4753]: I0129 12:27:04.430417 4753 reflector.go:368] Caches populated for *v1.Secret from object-"swift-kuttl-tests"/"barbican-barbican-dockercfg-n479x" Jan 29 12:27:04 crc kubenswrapper[4753]: I0129 12:27:04.442829 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/barbican-worker-d6d6cc49c-6q5wj"] Jan 29 12:27:04 crc kubenswrapper[4753]: I0129 12:27:04.502058 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["swift-kuttl-tests/barbican-keystone-listener-55fbcf8568-mtgrc"] Jan 29 12:27:04 crc kubenswrapper[4753]: I0129 12:27:04.503473 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/barbican-keystone-listener-55fbcf8568-mtgrc" Jan 29 12:27:04 crc kubenswrapper[4753]: I0129 12:27:04.506525 4753 reflector.go:368] Caches populated for *v1.Secret from object-"swift-kuttl-tests"/"barbican-keystone-listener-config-data" Jan 29 12:27:04 crc kubenswrapper[4753]: I0129 12:27:04.536537 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/barbican-keystone-listener-55fbcf8568-mtgrc"] Jan 29 12:27:04 crc kubenswrapper[4753]: I0129 12:27:04.617674 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8dvtp\" (UniqueName: \"kubernetes.io/projected/49d7637b-85ec-47c7-bf42-a55aa5a8b8dd-kube-api-access-8dvtp\") pod \"barbican-worker-d6d6cc49c-6q5wj\" (UID: \"49d7637b-85ec-47c7-bf42-a55aa5a8b8dd\") " pod="swift-kuttl-tests/barbican-worker-d6d6cc49c-6q5wj" Jan 29 12:27:04 crc kubenswrapper[4753]: I0129 12:27:04.617752 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3f4a7f8b-0fa0-48f7-a3c1-8dbaf4a4f852-config-data-custom\") pod \"barbican-keystone-listener-55fbcf8568-mtgrc\" (UID: \"3f4a7f8b-0fa0-48f7-a3c1-8dbaf4a4f852\") " pod="swift-kuttl-tests/barbican-keystone-listener-55fbcf8568-mtgrc" Jan 29 12:27:04 crc kubenswrapper[4753]: I0129 12:27:04.617894 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9l6b4\" (UniqueName: \"kubernetes.io/projected/3f4a7f8b-0fa0-48f7-a3c1-8dbaf4a4f852-kube-api-access-9l6b4\") pod \"barbican-keystone-listener-55fbcf8568-mtgrc\" (UID: \"3f4a7f8b-0fa0-48f7-a3c1-8dbaf4a4f852\") " pod="swift-kuttl-tests/barbican-keystone-listener-55fbcf8568-mtgrc" Jan 29 12:27:04 crc kubenswrapper[4753]: I0129 12:27:04.618017 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3f4a7f8b-0fa0-48f7-a3c1-8dbaf4a4f852-logs\") pod \"barbican-keystone-listener-55fbcf8568-mtgrc\" (UID: \"3f4a7f8b-0fa0-48f7-a3c1-8dbaf4a4f852\") " pod="swift-kuttl-tests/barbican-keystone-listener-55fbcf8568-mtgrc" Jan 29 12:27:04 crc kubenswrapper[4753]: I0129 12:27:04.618079 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/49d7637b-85ec-47c7-bf42-a55aa5a8b8dd-logs\") pod \"barbican-worker-d6d6cc49c-6q5wj\" (UID: \"49d7637b-85ec-47c7-bf42-a55aa5a8b8dd\") " pod="swift-kuttl-tests/barbican-worker-d6d6cc49c-6q5wj" Jan 29 12:27:04 crc kubenswrapper[4753]: I0129 12:27:04.618162 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/49d7637b-85ec-47c7-bf42-a55aa5a8b8dd-config-data-custom\") pod \"barbican-worker-d6d6cc49c-6q5wj\" (UID: \"49d7637b-85ec-47c7-bf42-a55aa5a8b8dd\") " pod="swift-kuttl-tests/barbican-worker-d6d6cc49c-6q5wj" Jan 29 12:27:04 crc kubenswrapper[4753]: I0129 12:27:04.618348 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3f4a7f8b-0fa0-48f7-a3c1-8dbaf4a4f852-config-data\") pod \"barbican-keystone-listener-55fbcf8568-mtgrc\" (UID: \"3f4a7f8b-0fa0-48f7-a3c1-8dbaf4a4f852\") " pod="swift-kuttl-tests/barbican-keystone-listener-55fbcf8568-mtgrc" Jan 29 12:27:04 crc kubenswrapper[4753]: I0129 12:27:04.618402 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/49d7637b-85ec-47c7-bf42-a55aa5a8b8dd-config-data\") pod \"barbican-worker-d6d6cc49c-6q5wj\" (UID: \"49d7637b-85ec-47c7-bf42-a55aa5a8b8dd\") " pod="swift-kuttl-tests/barbican-worker-d6d6cc49c-6q5wj" Jan 29 12:27:04 crc kubenswrapper[4753]: I0129 12:27:04.669217 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["swift-kuttl-tests/barbican-api-6b47c5b866-9jg8k"] Jan 29 12:27:04 crc kubenswrapper[4753]: I0129 12:27:04.671103 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/barbican-api-6b47c5b866-9jg8k" Jan 29 12:27:04 crc kubenswrapper[4753]: I0129 12:27:04.673239 4753 reflector.go:368] Caches populated for *v1.Secret from object-"swift-kuttl-tests"/"barbican-api-config-data" Jan 29 12:27:04 crc kubenswrapper[4753]: I0129 12:27:04.691890 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/barbican-api-6b47c5b866-9jg8k"] Jan 29 12:27:04 crc kubenswrapper[4753]: I0129 12:27:04.720091 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3f4a7f8b-0fa0-48f7-a3c1-8dbaf4a4f852-config-data-custom\") pod \"barbican-keystone-listener-55fbcf8568-mtgrc\" (UID: \"3f4a7f8b-0fa0-48f7-a3c1-8dbaf4a4f852\") " pod="swift-kuttl-tests/barbican-keystone-listener-55fbcf8568-mtgrc" Jan 29 12:27:04 crc kubenswrapper[4753]: I0129 12:27:04.720160 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9l6b4\" (UniqueName: \"kubernetes.io/projected/3f4a7f8b-0fa0-48f7-a3c1-8dbaf4a4f852-kube-api-access-9l6b4\") pod \"barbican-keystone-listener-55fbcf8568-mtgrc\" (UID: \"3f4a7f8b-0fa0-48f7-a3c1-8dbaf4a4f852\") " pod="swift-kuttl-tests/barbican-keystone-listener-55fbcf8568-mtgrc" Jan 29 12:27:04 crc kubenswrapper[4753]: I0129 12:27:04.720214 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3f4a7f8b-0fa0-48f7-a3c1-8dbaf4a4f852-logs\") pod \"barbican-keystone-listener-55fbcf8568-mtgrc\" (UID: \"3f4a7f8b-0fa0-48f7-a3c1-8dbaf4a4f852\") " pod="swift-kuttl-tests/barbican-keystone-listener-55fbcf8568-mtgrc" Jan 29 12:27:04 crc kubenswrapper[4753]: I0129 12:27:04.720271 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/49d7637b-85ec-47c7-bf42-a55aa5a8b8dd-logs\") pod \"barbican-worker-d6d6cc49c-6q5wj\" (UID: \"49d7637b-85ec-47c7-bf42-a55aa5a8b8dd\") " pod="swift-kuttl-tests/barbican-worker-d6d6cc49c-6q5wj" Jan 29 12:27:04 crc kubenswrapper[4753]: I0129 12:27:04.720330 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/49d7637b-85ec-47c7-bf42-a55aa5a8b8dd-config-data-custom\") pod \"barbican-worker-d6d6cc49c-6q5wj\" (UID: \"49d7637b-85ec-47c7-bf42-a55aa5a8b8dd\") " pod="swift-kuttl-tests/barbican-worker-d6d6cc49c-6q5wj" Jan 29 12:27:04 crc kubenswrapper[4753]: I0129 12:27:04.720387 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3f4a7f8b-0fa0-48f7-a3c1-8dbaf4a4f852-config-data\") pod \"barbican-keystone-listener-55fbcf8568-mtgrc\" (UID: \"3f4a7f8b-0fa0-48f7-a3c1-8dbaf4a4f852\") " pod="swift-kuttl-tests/barbican-keystone-listener-55fbcf8568-mtgrc" Jan 29 12:27:04 crc kubenswrapper[4753]: I0129 12:27:04.720409 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/49d7637b-85ec-47c7-bf42-a55aa5a8b8dd-config-data\") pod \"barbican-worker-d6d6cc49c-6q5wj\" (UID: \"49d7637b-85ec-47c7-bf42-a55aa5a8b8dd\") " pod="swift-kuttl-tests/barbican-worker-d6d6cc49c-6q5wj" Jan 29 12:27:04 crc kubenswrapper[4753]: I0129 12:27:04.720489 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8dvtp\" (UniqueName: \"kubernetes.io/projected/49d7637b-85ec-47c7-bf42-a55aa5a8b8dd-kube-api-access-8dvtp\") pod \"barbican-worker-d6d6cc49c-6q5wj\" (UID: \"49d7637b-85ec-47c7-bf42-a55aa5a8b8dd\") " pod="swift-kuttl-tests/barbican-worker-d6d6cc49c-6q5wj" Jan 29 12:27:04 crc kubenswrapper[4753]: I0129 12:27:04.721096 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3f4a7f8b-0fa0-48f7-a3c1-8dbaf4a4f852-logs\") pod \"barbican-keystone-listener-55fbcf8568-mtgrc\" (UID: \"3f4a7f8b-0fa0-48f7-a3c1-8dbaf4a4f852\") " pod="swift-kuttl-tests/barbican-keystone-listener-55fbcf8568-mtgrc" Jan 29 12:27:04 crc kubenswrapper[4753]: I0129 12:27:04.721250 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/49d7637b-85ec-47c7-bf42-a55aa5a8b8dd-logs\") pod \"barbican-worker-d6d6cc49c-6q5wj\" (UID: \"49d7637b-85ec-47c7-bf42-a55aa5a8b8dd\") " pod="swift-kuttl-tests/barbican-worker-d6d6cc49c-6q5wj" Jan 29 12:27:04 crc kubenswrapper[4753]: I0129 12:27:04.727037 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/49d7637b-85ec-47c7-bf42-a55aa5a8b8dd-config-data\") pod \"barbican-worker-d6d6cc49c-6q5wj\" (UID: \"49d7637b-85ec-47c7-bf42-a55aa5a8b8dd\") " pod="swift-kuttl-tests/barbican-worker-d6d6cc49c-6q5wj" Jan 29 12:27:04 crc kubenswrapper[4753]: I0129 12:27:04.727567 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/49d7637b-85ec-47c7-bf42-a55aa5a8b8dd-config-data-custom\") pod \"barbican-worker-d6d6cc49c-6q5wj\" (UID: \"49d7637b-85ec-47c7-bf42-a55aa5a8b8dd\") " pod="swift-kuttl-tests/barbican-worker-d6d6cc49c-6q5wj" Jan 29 12:27:04 crc kubenswrapper[4753]: I0129 12:27:04.729448 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3f4a7f8b-0fa0-48f7-a3c1-8dbaf4a4f852-config-data-custom\") pod \"barbican-keystone-listener-55fbcf8568-mtgrc\" (UID: \"3f4a7f8b-0fa0-48f7-a3c1-8dbaf4a4f852\") " pod="swift-kuttl-tests/barbican-keystone-listener-55fbcf8568-mtgrc" Jan 29 12:27:04 crc kubenswrapper[4753]: I0129 12:27:04.732739 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3f4a7f8b-0fa0-48f7-a3c1-8dbaf4a4f852-config-data\") pod \"barbican-keystone-listener-55fbcf8568-mtgrc\" (UID: \"3f4a7f8b-0fa0-48f7-a3c1-8dbaf4a4f852\") " pod="swift-kuttl-tests/barbican-keystone-listener-55fbcf8568-mtgrc" Jan 29 12:27:04 crc kubenswrapper[4753]: I0129 12:27:04.757639 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8dvtp\" (UniqueName: \"kubernetes.io/projected/49d7637b-85ec-47c7-bf42-a55aa5a8b8dd-kube-api-access-8dvtp\") pod \"barbican-worker-d6d6cc49c-6q5wj\" (UID: \"49d7637b-85ec-47c7-bf42-a55aa5a8b8dd\") " pod="swift-kuttl-tests/barbican-worker-d6d6cc49c-6q5wj" Jan 29 12:27:04 crc kubenswrapper[4753]: I0129 12:27:04.765894 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9l6b4\" (UniqueName: \"kubernetes.io/projected/3f4a7f8b-0fa0-48f7-a3c1-8dbaf4a4f852-kube-api-access-9l6b4\") pod \"barbican-keystone-listener-55fbcf8568-mtgrc\" (UID: \"3f4a7f8b-0fa0-48f7-a3c1-8dbaf4a4f852\") " pod="swift-kuttl-tests/barbican-keystone-listener-55fbcf8568-mtgrc" Jan 29 12:27:04 crc kubenswrapper[4753]: I0129 12:27:04.822412 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ee7aaa1d-e1b4-4ec4-8ef6-2bffb1fa71e1-logs\") pod \"barbican-api-6b47c5b866-9jg8k\" (UID: \"ee7aaa1d-e1b4-4ec4-8ef6-2bffb1fa71e1\") " pod="swift-kuttl-tests/barbican-api-6b47c5b866-9jg8k" Jan 29 12:27:04 crc kubenswrapper[4753]: I0129 12:27:04.823095 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ee7aaa1d-e1b4-4ec4-8ef6-2bffb1fa71e1-config-data-custom\") pod \"barbican-api-6b47c5b866-9jg8k\" (UID: \"ee7aaa1d-e1b4-4ec4-8ef6-2bffb1fa71e1\") " pod="swift-kuttl-tests/barbican-api-6b47c5b866-9jg8k" Jan 29 12:27:04 crc kubenswrapper[4753]: I0129 12:27:04.823192 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7994k\" (UniqueName: \"kubernetes.io/projected/ee7aaa1d-e1b4-4ec4-8ef6-2bffb1fa71e1-kube-api-access-7994k\") pod \"barbican-api-6b47c5b866-9jg8k\" (UID: \"ee7aaa1d-e1b4-4ec4-8ef6-2bffb1fa71e1\") " pod="swift-kuttl-tests/barbican-api-6b47c5b866-9jg8k" Jan 29 12:27:04 crc kubenswrapper[4753]: I0129 12:27:04.823286 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee7aaa1d-e1b4-4ec4-8ef6-2bffb1fa71e1-config-data\") pod \"barbican-api-6b47c5b866-9jg8k\" (UID: \"ee7aaa1d-e1b4-4ec4-8ef6-2bffb1fa71e1\") " pod="swift-kuttl-tests/barbican-api-6b47c5b866-9jg8k" Jan 29 12:27:04 crc kubenswrapper[4753]: I0129 12:27:04.837032 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/barbican-keystone-listener-55fbcf8568-mtgrc" Jan 29 12:27:04 crc kubenswrapper[4753]: I0129 12:27:04.925028 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ee7aaa1d-e1b4-4ec4-8ef6-2bffb1fa71e1-logs\") pod \"barbican-api-6b47c5b866-9jg8k\" (UID: \"ee7aaa1d-e1b4-4ec4-8ef6-2bffb1fa71e1\") " pod="swift-kuttl-tests/barbican-api-6b47c5b866-9jg8k" Jan 29 12:27:04 crc kubenswrapper[4753]: I0129 12:27:04.925126 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ee7aaa1d-e1b4-4ec4-8ef6-2bffb1fa71e1-config-data-custom\") pod \"barbican-api-6b47c5b866-9jg8k\" (UID: \"ee7aaa1d-e1b4-4ec4-8ef6-2bffb1fa71e1\") " pod="swift-kuttl-tests/barbican-api-6b47c5b866-9jg8k" Jan 29 12:27:04 crc kubenswrapper[4753]: I0129 12:27:04.925208 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7994k\" (UniqueName: \"kubernetes.io/projected/ee7aaa1d-e1b4-4ec4-8ef6-2bffb1fa71e1-kube-api-access-7994k\") pod \"barbican-api-6b47c5b866-9jg8k\" (UID: \"ee7aaa1d-e1b4-4ec4-8ef6-2bffb1fa71e1\") " pod="swift-kuttl-tests/barbican-api-6b47c5b866-9jg8k" Jan 29 12:27:04 crc kubenswrapper[4753]: I0129 12:27:04.925277 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee7aaa1d-e1b4-4ec4-8ef6-2bffb1fa71e1-config-data\") pod \"barbican-api-6b47c5b866-9jg8k\" (UID: \"ee7aaa1d-e1b4-4ec4-8ef6-2bffb1fa71e1\") " pod="swift-kuttl-tests/barbican-api-6b47c5b866-9jg8k" Jan 29 12:27:04 crc kubenswrapper[4753]: I0129 12:27:04.926993 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ee7aaa1d-e1b4-4ec4-8ef6-2bffb1fa71e1-logs\") pod \"barbican-api-6b47c5b866-9jg8k\" (UID: \"ee7aaa1d-e1b4-4ec4-8ef6-2bffb1fa71e1\") " pod="swift-kuttl-tests/barbican-api-6b47c5b866-9jg8k" Jan 29 12:27:04 crc kubenswrapper[4753]: I0129 12:27:04.931458 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee7aaa1d-e1b4-4ec4-8ef6-2bffb1fa71e1-config-data\") pod \"barbican-api-6b47c5b866-9jg8k\" (UID: \"ee7aaa1d-e1b4-4ec4-8ef6-2bffb1fa71e1\") " pod="swift-kuttl-tests/barbican-api-6b47c5b866-9jg8k" Jan 29 12:27:04 crc kubenswrapper[4753]: I0129 12:27:04.933135 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ee7aaa1d-e1b4-4ec4-8ef6-2bffb1fa71e1-config-data-custom\") pod \"barbican-api-6b47c5b866-9jg8k\" (UID: \"ee7aaa1d-e1b4-4ec4-8ef6-2bffb1fa71e1\") " pod="swift-kuttl-tests/barbican-api-6b47c5b866-9jg8k" Jan 29 12:27:04 crc kubenswrapper[4753]: I0129 12:27:04.944453 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7994k\" (UniqueName: \"kubernetes.io/projected/ee7aaa1d-e1b4-4ec4-8ef6-2bffb1fa71e1-kube-api-access-7994k\") pod \"barbican-api-6b47c5b866-9jg8k\" (UID: \"ee7aaa1d-e1b4-4ec4-8ef6-2bffb1fa71e1\") " pod="swift-kuttl-tests/barbican-api-6b47c5b866-9jg8k" Jan 29 12:27:04 crc kubenswrapper[4753]: I0129 12:27:04.986731 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/barbican-api-6b47c5b866-9jg8k" Jan 29 12:27:05 crc kubenswrapper[4753]: I0129 12:27:05.043474 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/barbican-worker-d6d6cc49c-6q5wj" Jan 29 12:27:05 crc kubenswrapper[4753]: I0129 12:27:05.380854 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/barbican-keystone-listener-55fbcf8568-mtgrc"] Jan 29 12:27:05 crc kubenswrapper[4753]: I0129 12:27:05.386012 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/swift-operator-controller-manager-f8bdff7d9-wjc8b"] Jan 29 12:27:05 crc kubenswrapper[4753]: I0129 12:27:05.390119 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-f8bdff7d9-wjc8b" Jan 29 12:27:05 crc kubenswrapper[4753]: I0129 12:27:05.398343 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-7w7b8" Jan 29 12:27:05 crc kubenswrapper[4753]: I0129 12:27:05.398531 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-service-cert" Jan 29 12:27:05 crc kubenswrapper[4753]: I0129 12:27:05.426692 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-f8bdff7d9-wjc8b"] Jan 29 12:27:05 crc kubenswrapper[4753]: I0129 12:27:05.448654 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/barbican-worker-d6d6cc49c-6q5wj"] Jan 29 12:27:05 crc kubenswrapper[4753]: W0129 12:27:05.454713 4753 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod49d7637b_85ec_47c7_bf42_a55aa5a8b8dd.slice/crio-04e7228e9d6f786fa8e128a6d95d50d9703b866084b686b4f155547cc8d1557e WatchSource:0}: Error finding container 04e7228e9d6f786fa8e128a6d95d50d9703b866084b686b4f155547cc8d1557e: Status 404 returned error can't find the container with id 04e7228e9d6f786fa8e128a6d95d50d9703b866084b686b4f155547cc8d1557e Jan 29 12:27:05 crc kubenswrapper[4753]: I0129 12:27:05.512686 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/barbican-api-6b47c5b866-9jg8k"] Jan 29 12:27:05 crc kubenswrapper[4753]: I0129 12:27:05.535575 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/f49f1749-fe9d-4a18-b81c-65ab628d882e-webhook-cert\") pod \"swift-operator-controller-manager-f8bdff7d9-wjc8b\" (UID: \"f49f1749-fe9d-4a18-b81c-65ab628d882e\") " pod="openstack-operators/swift-operator-controller-manager-f8bdff7d9-wjc8b" Jan 29 12:27:05 crc kubenswrapper[4753]: I0129 12:27:05.535726 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cfpfv\" (UniqueName: \"kubernetes.io/projected/f49f1749-fe9d-4a18-b81c-65ab628d882e-kube-api-access-cfpfv\") pod \"swift-operator-controller-manager-f8bdff7d9-wjc8b\" (UID: \"f49f1749-fe9d-4a18-b81c-65ab628d882e\") " pod="openstack-operators/swift-operator-controller-manager-f8bdff7d9-wjc8b" Jan 29 12:27:05 crc kubenswrapper[4753]: I0129 12:27:05.535761 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/f49f1749-fe9d-4a18-b81c-65ab628d882e-apiservice-cert\") pod \"swift-operator-controller-manager-f8bdff7d9-wjc8b\" (UID: \"f49f1749-fe9d-4a18-b81c-65ab628d882e\") " pod="openstack-operators/swift-operator-controller-manager-f8bdff7d9-wjc8b" Jan 29 12:27:05 crc kubenswrapper[4753]: I0129 12:27:05.637688 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cfpfv\" (UniqueName: \"kubernetes.io/projected/f49f1749-fe9d-4a18-b81c-65ab628d882e-kube-api-access-cfpfv\") pod \"swift-operator-controller-manager-f8bdff7d9-wjc8b\" (UID: \"f49f1749-fe9d-4a18-b81c-65ab628d882e\") " pod="openstack-operators/swift-operator-controller-manager-f8bdff7d9-wjc8b" Jan 29 12:27:05 crc kubenswrapper[4753]: I0129 12:27:05.637761 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/f49f1749-fe9d-4a18-b81c-65ab628d882e-apiservice-cert\") pod \"swift-operator-controller-manager-f8bdff7d9-wjc8b\" (UID: \"f49f1749-fe9d-4a18-b81c-65ab628d882e\") " pod="openstack-operators/swift-operator-controller-manager-f8bdff7d9-wjc8b" Jan 29 12:27:05 crc kubenswrapper[4753]: I0129 12:27:05.637860 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/f49f1749-fe9d-4a18-b81c-65ab628d882e-webhook-cert\") pod \"swift-operator-controller-manager-f8bdff7d9-wjc8b\" (UID: \"f49f1749-fe9d-4a18-b81c-65ab628d882e\") " pod="openstack-operators/swift-operator-controller-manager-f8bdff7d9-wjc8b" Jan 29 12:27:05 crc kubenswrapper[4753]: I0129 12:27:05.644251 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/f49f1749-fe9d-4a18-b81c-65ab628d882e-apiservice-cert\") pod \"swift-operator-controller-manager-f8bdff7d9-wjc8b\" (UID: \"f49f1749-fe9d-4a18-b81c-65ab628d882e\") " pod="openstack-operators/swift-operator-controller-manager-f8bdff7d9-wjc8b" Jan 29 12:27:05 crc kubenswrapper[4753]: I0129 12:27:05.644534 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/f49f1749-fe9d-4a18-b81c-65ab628d882e-webhook-cert\") pod \"swift-operator-controller-manager-f8bdff7d9-wjc8b\" (UID: \"f49f1749-fe9d-4a18-b81c-65ab628d882e\") " pod="openstack-operators/swift-operator-controller-manager-f8bdff7d9-wjc8b" Jan 29 12:27:05 crc kubenswrapper[4753]: I0129 12:27:05.656829 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cfpfv\" (UniqueName: \"kubernetes.io/projected/f49f1749-fe9d-4a18-b81c-65ab628d882e-kube-api-access-cfpfv\") pod \"swift-operator-controller-manager-f8bdff7d9-wjc8b\" (UID: \"f49f1749-fe9d-4a18-b81c-65ab628d882e\") " pod="openstack-operators/swift-operator-controller-manager-f8bdff7d9-wjc8b" Jan 29 12:27:05 crc kubenswrapper[4753]: I0129 12:27:05.714516 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-f8bdff7d9-wjc8b" Jan 29 12:27:06 crc kubenswrapper[4753]: I0129 12:27:06.092600 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/barbican-api-6b47c5b866-9jg8k" event={"ID":"ee7aaa1d-e1b4-4ec4-8ef6-2bffb1fa71e1","Type":"ContainerStarted","Data":"f5db0ea18c845c065ed6db5307b995c2d28fb39b39c926836dba6ea02f233fcc"} Jan 29 12:27:06 crc kubenswrapper[4753]: I0129 12:27:06.093053 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/barbican-api-6b47c5b866-9jg8k" event={"ID":"ee7aaa1d-e1b4-4ec4-8ef6-2bffb1fa71e1","Type":"ContainerStarted","Data":"62c7539c2fe6cfc8d7dc37561f9c1101de0457fb66f73fdf92cfc51c43d3c6e2"} Jan 29 12:27:06 crc kubenswrapper[4753]: I0129 12:27:06.093080 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/barbican-api-6b47c5b866-9jg8k" event={"ID":"ee7aaa1d-e1b4-4ec4-8ef6-2bffb1fa71e1","Type":"ContainerStarted","Data":"9da35642687773f59fcbf13d7a9bc82da7ccb97e310ac00f086c7aa70b652465"} Jan 29 12:27:06 crc kubenswrapper[4753]: I0129 12:27:06.093197 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="swift-kuttl-tests/barbican-api-6b47c5b866-9jg8k" Jan 29 12:27:06 crc kubenswrapper[4753]: I0129 12:27:06.094880 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/barbican-worker-d6d6cc49c-6q5wj" event={"ID":"49d7637b-85ec-47c7-bf42-a55aa5a8b8dd","Type":"ContainerStarted","Data":"04e7228e9d6f786fa8e128a6d95d50d9703b866084b686b4f155547cc8d1557e"} Jan 29 12:27:06 crc kubenswrapper[4753]: I0129 12:27:06.098070 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/barbican-keystone-listener-55fbcf8568-mtgrc" event={"ID":"3f4a7f8b-0fa0-48f7-a3c1-8dbaf4a4f852","Type":"ContainerStarted","Data":"4249fde024f27500b7c32ec094d162b44af9829f8398d9e7dd23e1a540472c6c"} Jan 29 12:27:06 crc kubenswrapper[4753]: I0129 12:27:06.124439 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="swift-kuttl-tests/barbican-api-6b47c5b866-9jg8k" podStartSLOduration=2.124408368 podStartE2EDuration="2.124408368s" podCreationTimestamp="2026-01-29 12:27:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:27:06.119851372 +0000 UTC m=+1240.371932827" watchObservedRunningTime="2026-01-29 12:27:06.124408368 +0000 UTC m=+1240.376489823" Jan 29 12:27:06 crc kubenswrapper[4753]: I0129 12:27:06.356360 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-f8bdff7d9-wjc8b"] Jan 29 12:27:07 crc kubenswrapper[4753]: I0129 12:27:07.113664 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-f8bdff7d9-wjc8b" event={"ID":"f49f1749-fe9d-4a18-b81c-65ab628d882e","Type":"ContainerStarted","Data":"4586156451ae70cb31c219357d05eb97ecab418c2110ff017dc70aee04addecc"} Jan 29 12:27:07 crc kubenswrapper[4753]: I0129 12:27:07.113732 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="swift-kuttl-tests/barbican-api-6b47c5b866-9jg8k" Jan 29 12:27:11 crc kubenswrapper[4753]: I0129 12:27:11.151007 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/barbican-worker-d6d6cc49c-6q5wj" event={"ID":"49d7637b-85ec-47c7-bf42-a55aa5a8b8dd","Type":"ContainerStarted","Data":"c62784be0ecc6f613d61e3d10582c6f028f1c1dea1033542d99572833441d2cd"} Jan 29 12:27:11 crc kubenswrapper[4753]: I0129 12:27:11.151676 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/barbican-worker-d6d6cc49c-6q5wj" event={"ID":"49d7637b-85ec-47c7-bf42-a55aa5a8b8dd","Type":"ContainerStarted","Data":"fc8fc10b9da2aaaf37221ef3ec40f59ab9e07800d4b88fe042c4833f0e2810c5"} Jan 29 12:27:11 crc kubenswrapper[4753]: I0129 12:27:11.155352 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-f8bdff7d9-wjc8b" event={"ID":"f49f1749-fe9d-4a18-b81c-65ab628d882e","Type":"ContainerStarted","Data":"ec4fcea80b360bf897b1acf454663f063df4e89ac871de1e919e9cbb26ef50b8"} Jan 29 12:27:11 crc kubenswrapper[4753]: I0129 12:27:11.156281 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-f8bdff7d9-wjc8b" Jan 29 12:27:11 crc kubenswrapper[4753]: I0129 12:27:11.158623 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/barbican-keystone-listener-55fbcf8568-mtgrc" event={"ID":"3f4a7f8b-0fa0-48f7-a3c1-8dbaf4a4f852","Type":"ContainerStarted","Data":"1991d457dadef2edd40ec30edf373e24eb3017e81ed995747af5afd3c5855f98"} Jan 29 12:27:11 crc kubenswrapper[4753]: I0129 12:27:11.158672 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/barbican-keystone-listener-55fbcf8568-mtgrc" event={"ID":"3f4a7f8b-0fa0-48f7-a3c1-8dbaf4a4f852","Type":"ContainerStarted","Data":"6d0de795d293a9496cc57597275c0e8e4aac8929167336a966effd96d47af5b8"} Jan 29 12:27:11 crc kubenswrapper[4753]: I0129 12:27:11.203440 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="swift-kuttl-tests/barbican-worker-d6d6cc49c-6q5wj" podStartSLOduration=2.832936131 podStartE2EDuration="7.203388738s" podCreationTimestamp="2026-01-29 12:27:04 +0000 UTC" firstStartedPulling="2026-01-29 12:27:05.456597023 +0000 UTC m=+1239.708678478" lastFinishedPulling="2026-01-29 12:27:09.82704963 +0000 UTC m=+1244.079131085" observedRunningTime="2026-01-29 12:27:11.176262726 +0000 UTC m=+1245.428344201" watchObservedRunningTime="2026-01-29 12:27:11.203388738 +0000 UTC m=+1245.455470203" Jan 29 12:27:11 crc kubenswrapper[4753]: I0129 12:27:11.208817 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="swift-kuttl-tests/barbican-keystone-listener-55fbcf8568-mtgrc" podStartSLOduration=2.790337081 podStartE2EDuration="7.208795249s" podCreationTimestamp="2026-01-29 12:27:04 +0000 UTC" firstStartedPulling="2026-01-29 12:27:05.383990899 +0000 UTC m=+1239.636072354" lastFinishedPulling="2026-01-29 12:27:09.802449067 +0000 UTC m=+1244.054530522" observedRunningTime="2026-01-29 12:27:11.203705137 +0000 UTC m=+1245.455786592" watchObservedRunningTime="2026-01-29 12:27:11.208795249 +0000 UTC m=+1245.460876704" Jan 29 12:27:11 crc kubenswrapper[4753]: I0129 12:27:11.228925 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/swift-operator-controller-manager-f8bdff7d9-wjc8b" podStartSLOduration=2.798528358 podStartE2EDuration="6.228898466s" podCreationTimestamp="2026-01-29 12:27:05 +0000 UTC" firstStartedPulling="2026-01-29 12:27:06.366523385 +0000 UTC m=+1240.618604830" lastFinishedPulling="2026-01-29 12:27:09.796893483 +0000 UTC m=+1244.048974938" observedRunningTime="2026-01-29 12:27:11.224750311 +0000 UTC m=+1245.476831766" watchObservedRunningTime="2026-01-29 12:27:11.228898466 +0000 UTC m=+1245.480979921" Jan 29 12:27:15 crc kubenswrapper[4753]: I0129 12:27:15.837852 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-f8bdff7d9-wjc8b" Jan 29 12:27:16 crc kubenswrapper[4753]: I0129 12:27:16.668101 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="swift-kuttl-tests/barbican-api-6b47c5b866-9jg8k" Jan 29 12:27:16 crc kubenswrapper[4753]: I0129 12:27:16.758990 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="swift-kuttl-tests/barbican-api-6b47c5b866-9jg8k" Jan 29 12:27:31 crc kubenswrapper[4753]: I0129 12:27:31.909472 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["swift-kuttl-tests/swift-storage-0"] Jan 29 12:27:31 crc kubenswrapper[4753]: I0129 12:27:31.920693 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:27:31 crc kubenswrapper[4753]: I0129 12:27:31.926213 4753 reflector.go:368] Caches populated for *v1.Secret from object-"swift-kuttl-tests"/"swift-conf" Jan 29 12:27:31 crc kubenswrapper[4753]: I0129 12:27:31.926213 4753 reflector.go:368] Caches populated for *v1.Secret from object-"swift-kuttl-tests"/"swift-swift-dockercfg-bdq87" Jan 29 12:27:31 crc kubenswrapper[4753]: I0129 12:27:31.926767 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"swift-kuttl-tests"/"swift-ring-files" Jan 29 12:27:31 crc kubenswrapper[4753]: I0129 12:27:31.927565 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"swift-kuttl-tests"/"swift-storage-config-data" Jan 29 12:27:31 crc kubenswrapper[4753]: I0129 12:27:31.948838 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/swift-storage-0"] Jan 29 12:27:31 crc kubenswrapper[4753]: I0129 12:27:31.956491 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["swift-kuttl-tests/swift-ring-rebalance-slxbn"] Jan 29 12:27:31 crc kubenswrapper[4753]: I0129 12:27:31.957698 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-ring-rebalance-slxbn" Jan 29 12:27:31 crc kubenswrapper[4753]: I0129 12:27:31.960365 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"swift-kuttl-tests"/"swift-ring-scripts" Jan 29 12:27:31 crc kubenswrapper[4753]: I0129 12:27:31.960420 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"swift-kuttl-tests"/"swift-ring-config-data" Jan 29 12:27:31 crc kubenswrapper[4753]: I0129 12:27:31.960697 4753 reflector.go:368] Caches populated for *v1.Secret from object-"swift-kuttl-tests"/"swift-proxy-config-data" Jan 29 12:27:31 crc kubenswrapper[4753]: I0129 12:27:31.970905 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/swift-ring-rebalance-slxbn"] Jan 29 12:27:32 crc kubenswrapper[4753]: I0129 12:27:32.037360 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/39385d87-5895-47b3-8fa4-dff5549bca97-etc-swift\") pod \"swift-storage-0\" (UID: \"39385d87-5895-47b3-8fa4-dff5549bca97\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:27:32 crc kubenswrapper[4753]: I0129 12:27:32.037514 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/39385d87-5895-47b3-8fa4-dff5549bca97-lock\") pod \"swift-storage-0\" (UID: \"39385d87-5895-47b3-8fa4-dff5549bca97\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:27:32 crc kubenswrapper[4753]: I0129 12:27:32.037582 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"swift-storage-0\" (UID: \"39385d87-5895-47b3-8fa4-dff5549bca97\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:27:32 crc kubenswrapper[4753]: I0129 12:27:32.037625 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-llvjn\" (UniqueName: \"kubernetes.io/projected/39385d87-5895-47b3-8fa4-dff5549bca97-kube-api-access-llvjn\") pod \"swift-storage-0\" (UID: \"39385d87-5895-47b3-8fa4-dff5549bca97\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:27:32 crc kubenswrapper[4753]: I0129 12:27:32.037652 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/39385d87-5895-47b3-8fa4-dff5549bca97-cache\") pod \"swift-storage-0\" (UID: \"39385d87-5895-47b3-8fa4-dff5549bca97\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:27:32 crc kubenswrapper[4753]: I0129 12:27:32.139453 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-llvjn\" (UniqueName: \"kubernetes.io/projected/39385d87-5895-47b3-8fa4-dff5549bca97-kube-api-access-llvjn\") pod \"swift-storage-0\" (UID: \"39385d87-5895-47b3-8fa4-dff5549bca97\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:27:32 crc kubenswrapper[4753]: I0129 12:27:32.139539 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a925aaa3-40e0-4915-a43d-31c3294b4cbb-scripts\") pod \"swift-ring-rebalance-slxbn\" (UID: \"a925aaa3-40e0-4915-a43d-31c3294b4cbb\") " pod="swift-kuttl-tests/swift-ring-rebalance-slxbn" Jan 29 12:27:32 crc kubenswrapper[4753]: I0129 12:27:32.139569 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/39385d87-5895-47b3-8fa4-dff5549bca97-cache\") pod \"swift-storage-0\" (UID: \"39385d87-5895-47b3-8fa4-dff5549bca97\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:27:32 crc kubenswrapper[4753]: I0129 12:27:32.139597 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/a925aaa3-40e0-4915-a43d-31c3294b4cbb-dispersionconf\") pod \"swift-ring-rebalance-slxbn\" (UID: \"a925aaa3-40e0-4915-a43d-31c3294b4cbb\") " pod="swift-kuttl-tests/swift-ring-rebalance-slxbn" Jan 29 12:27:32 crc kubenswrapper[4753]: I0129 12:27:32.139621 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/a925aaa3-40e0-4915-a43d-31c3294b4cbb-ring-data-devices\") pod \"swift-ring-rebalance-slxbn\" (UID: \"a925aaa3-40e0-4915-a43d-31c3294b4cbb\") " pod="swift-kuttl-tests/swift-ring-rebalance-slxbn" Jan 29 12:27:32 crc kubenswrapper[4753]: I0129 12:27:32.139646 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c765s\" (UniqueName: \"kubernetes.io/projected/a925aaa3-40e0-4915-a43d-31c3294b4cbb-kube-api-access-c765s\") pod \"swift-ring-rebalance-slxbn\" (UID: \"a925aaa3-40e0-4915-a43d-31c3294b4cbb\") " pod="swift-kuttl-tests/swift-ring-rebalance-slxbn" Jan 29 12:27:32 crc kubenswrapper[4753]: I0129 12:27:32.139691 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/a925aaa3-40e0-4915-a43d-31c3294b4cbb-swiftconf\") pod \"swift-ring-rebalance-slxbn\" (UID: \"a925aaa3-40e0-4915-a43d-31c3294b4cbb\") " pod="swift-kuttl-tests/swift-ring-rebalance-slxbn" Jan 29 12:27:32 crc kubenswrapper[4753]: I0129 12:27:32.139726 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/a925aaa3-40e0-4915-a43d-31c3294b4cbb-etc-swift\") pod \"swift-ring-rebalance-slxbn\" (UID: \"a925aaa3-40e0-4915-a43d-31c3294b4cbb\") " pod="swift-kuttl-tests/swift-ring-rebalance-slxbn" Jan 29 12:27:32 crc kubenswrapper[4753]: I0129 12:27:32.139746 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/39385d87-5895-47b3-8fa4-dff5549bca97-etc-swift\") pod \"swift-storage-0\" (UID: \"39385d87-5895-47b3-8fa4-dff5549bca97\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:27:32 crc kubenswrapper[4753]: I0129 12:27:32.139832 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/39385d87-5895-47b3-8fa4-dff5549bca97-lock\") pod \"swift-storage-0\" (UID: \"39385d87-5895-47b3-8fa4-dff5549bca97\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:27:32 crc kubenswrapper[4753]: I0129 12:27:32.139879 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"swift-storage-0\" (UID: \"39385d87-5895-47b3-8fa4-dff5549bca97\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:27:32 crc kubenswrapper[4753]: E0129 12:27:32.140195 4753 projected.go:288] Couldn't get configMap swift-kuttl-tests/swift-ring-files: configmap "swift-ring-files" not found Jan 29 12:27:32 crc kubenswrapper[4753]: E0129 12:27:32.140344 4753 projected.go:194] Error preparing data for projected volume etc-swift for pod swift-kuttl-tests/swift-storage-0: configmap "swift-ring-files" not found Jan 29 12:27:32 crc kubenswrapper[4753]: I0129 12:27:32.140421 4753 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"swift-storage-0\" (UID: \"39385d87-5895-47b3-8fa4-dff5549bca97\") device mount path \"/mnt/openstack/pv11\"" pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:27:32 crc kubenswrapper[4753]: E0129 12:27:32.140552 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/39385d87-5895-47b3-8fa4-dff5549bca97-etc-swift podName:39385d87-5895-47b3-8fa4-dff5549bca97 nodeName:}" failed. No retries permitted until 2026-01-29 12:27:32.640433018 +0000 UTC m=+1266.892514483 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/39385d87-5895-47b3-8fa4-dff5549bca97-etc-swift") pod "swift-storage-0" (UID: "39385d87-5895-47b3-8fa4-dff5549bca97") : configmap "swift-ring-files" not found Jan 29 12:27:32 crc kubenswrapper[4753]: I0129 12:27:32.140594 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/39385d87-5895-47b3-8fa4-dff5549bca97-lock\") pod \"swift-storage-0\" (UID: \"39385d87-5895-47b3-8fa4-dff5549bca97\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:27:32 crc kubenswrapper[4753]: I0129 12:27:32.140297 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/39385d87-5895-47b3-8fa4-dff5549bca97-cache\") pod \"swift-storage-0\" (UID: \"39385d87-5895-47b3-8fa4-dff5549bca97\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:27:32 crc kubenswrapper[4753]: I0129 12:27:32.165838 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-llvjn\" (UniqueName: \"kubernetes.io/projected/39385d87-5895-47b3-8fa4-dff5549bca97-kube-api-access-llvjn\") pod \"swift-storage-0\" (UID: \"39385d87-5895-47b3-8fa4-dff5549bca97\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:27:32 crc kubenswrapper[4753]: I0129 12:27:32.170893 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"swift-storage-0\" (UID: \"39385d87-5895-47b3-8fa4-dff5549bca97\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:27:32 crc kubenswrapper[4753]: I0129 12:27:32.241597 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/a925aaa3-40e0-4915-a43d-31c3294b4cbb-etc-swift\") pod \"swift-ring-rebalance-slxbn\" (UID: \"a925aaa3-40e0-4915-a43d-31c3294b4cbb\") " pod="swift-kuttl-tests/swift-ring-rebalance-slxbn" Jan 29 12:27:32 crc kubenswrapper[4753]: I0129 12:27:32.241850 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a925aaa3-40e0-4915-a43d-31c3294b4cbb-scripts\") pod \"swift-ring-rebalance-slxbn\" (UID: \"a925aaa3-40e0-4915-a43d-31c3294b4cbb\") " pod="swift-kuttl-tests/swift-ring-rebalance-slxbn" Jan 29 12:27:32 crc kubenswrapper[4753]: I0129 12:27:32.241885 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/a925aaa3-40e0-4915-a43d-31c3294b4cbb-dispersionconf\") pod \"swift-ring-rebalance-slxbn\" (UID: \"a925aaa3-40e0-4915-a43d-31c3294b4cbb\") " pod="swift-kuttl-tests/swift-ring-rebalance-slxbn" Jan 29 12:27:32 crc kubenswrapper[4753]: I0129 12:27:32.241925 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/a925aaa3-40e0-4915-a43d-31c3294b4cbb-ring-data-devices\") pod \"swift-ring-rebalance-slxbn\" (UID: \"a925aaa3-40e0-4915-a43d-31c3294b4cbb\") " pod="swift-kuttl-tests/swift-ring-rebalance-slxbn" Jan 29 12:27:32 crc kubenswrapper[4753]: I0129 12:27:32.241952 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c765s\" (UniqueName: \"kubernetes.io/projected/a925aaa3-40e0-4915-a43d-31c3294b4cbb-kube-api-access-c765s\") pod \"swift-ring-rebalance-slxbn\" (UID: \"a925aaa3-40e0-4915-a43d-31c3294b4cbb\") " pod="swift-kuttl-tests/swift-ring-rebalance-slxbn" Jan 29 12:27:32 crc kubenswrapper[4753]: I0129 12:27:32.241990 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/a925aaa3-40e0-4915-a43d-31c3294b4cbb-swiftconf\") pod \"swift-ring-rebalance-slxbn\" (UID: \"a925aaa3-40e0-4915-a43d-31c3294b4cbb\") " pod="swift-kuttl-tests/swift-ring-rebalance-slxbn" Jan 29 12:27:32 crc kubenswrapper[4753]: I0129 12:27:32.242151 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/a925aaa3-40e0-4915-a43d-31c3294b4cbb-etc-swift\") pod \"swift-ring-rebalance-slxbn\" (UID: \"a925aaa3-40e0-4915-a43d-31c3294b4cbb\") " pod="swift-kuttl-tests/swift-ring-rebalance-slxbn" Jan 29 12:27:32 crc kubenswrapper[4753]: I0129 12:27:32.243040 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a925aaa3-40e0-4915-a43d-31c3294b4cbb-scripts\") pod \"swift-ring-rebalance-slxbn\" (UID: \"a925aaa3-40e0-4915-a43d-31c3294b4cbb\") " pod="swift-kuttl-tests/swift-ring-rebalance-slxbn" Jan 29 12:27:32 crc kubenswrapper[4753]: I0129 12:27:32.243051 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/a925aaa3-40e0-4915-a43d-31c3294b4cbb-ring-data-devices\") pod \"swift-ring-rebalance-slxbn\" (UID: \"a925aaa3-40e0-4915-a43d-31c3294b4cbb\") " pod="swift-kuttl-tests/swift-ring-rebalance-slxbn" Jan 29 12:27:32 crc kubenswrapper[4753]: I0129 12:27:32.245414 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/a925aaa3-40e0-4915-a43d-31c3294b4cbb-dispersionconf\") pod \"swift-ring-rebalance-slxbn\" (UID: \"a925aaa3-40e0-4915-a43d-31c3294b4cbb\") " pod="swift-kuttl-tests/swift-ring-rebalance-slxbn" Jan 29 12:27:32 crc kubenswrapper[4753]: I0129 12:27:32.248475 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/a925aaa3-40e0-4915-a43d-31c3294b4cbb-swiftconf\") pod \"swift-ring-rebalance-slxbn\" (UID: \"a925aaa3-40e0-4915-a43d-31c3294b4cbb\") " pod="swift-kuttl-tests/swift-ring-rebalance-slxbn" Jan 29 12:27:32 crc kubenswrapper[4753]: I0129 12:27:32.263931 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c765s\" (UniqueName: \"kubernetes.io/projected/a925aaa3-40e0-4915-a43d-31c3294b4cbb-kube-api-access-c765s\") pod \"swift-ring-rebalance-slxbn\" (UID: \"a925aaa3-40e0-4915-a43d-31c3294b4cbb\") " pod="swift-kuttl-tests/swift-ring-rebalance-slxbn" Jan 29 12:27:32 crc kubenswrapper[4753]: I0129 12:27:32.271761 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-ring-rebalance-slxbn" Jan 29 12:27:32 crc kubenswrapper[4753]: I0129 12:27:32.496530 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["swift-kuttl-tests/swift-proxy-6bc96d68cf-vw4t2"] Jan 29 12:27:32 crc kubenswrapper[4753]: I0129 12:27:32.497977 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-proxy-6bc96d68cf-vw4t2" Jan 29 12:27:32 crc kubenswrapper[4753]: I0129 12:27:32.528546 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/swift-proxy-6bc96d68cf-vw4t2"] Jan 29 12:27:32 crc kubenswrapper[4753]: I0129 12:27:32.657079 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1279ea7f-350d-4657-8d79-d384e6974700-config-data\") pod \"swift-proxy-6bc96d68cf-vw4t2\" (UID: \"1279ea7f-350d-4657-8d79-d384e6974700\") " pod="swift-kuttl-tests/swift-proxy-6bc96d68cf-vw4t2" Jan 29 12:27:32 crc kubenswrapper[4753]: I0129 12:27:32.657265 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/39385d87-5895-47b3-8fa4-dff5549bca97-etc-swift\") pod \"swift-storage-0\" (UID: \"39385d87-5895-47b3-8fa4-dff5549bca97\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:27:32 crc kubenswrapper[4753]: I0129 12:27:32.657347 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/1279ea7f-350d-4657-8d79-d384e6974700-etc-swift\") pod \"swift-proxy-6bc96d68cf-vw4t2\" (UID: \"1279ea7f-350d-4657-8d79-d384e6974700\") " pod="swift-kuttl-tests/swift-proxy-6bc96d68cf-vw4t2" Jan 29 12:27:32 crc kubenswrapper[4753]: I0129 12:27:32.657375 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1279ea7f-350d-4657-8d79-d384e6974700-log-httpd\") pod \"swift-proxy-6bc96d68cf-vw4t2\" (UID: \"1279ea7f-350d-4657-8d79-d384e6974700\") " pod="swift-kuttl-tests/swift-proxy-6bc96d68cf-vw4t2" Jan 29 12:27:32 crc kubenswrapper[4753]: I0129 12:27:32.657450 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rbtfh\" (UniqueName: \"kubernetes.io/projected/1279ea7f-350d-4657-8d79-d384e6974700-kube-api-access-rbtfh\") pod \"swift-proxy-6bc96d68cf-vw4t2\" (UID: \"1279ea7f-350d-4657-8d79-d384e6974700\") " pod="swift-kuttl-tests/swift-proxy-6bc96d68cf-vw4t2" Jan 29 12:27:32 crc kubenswrapper[4753]: I0129 12:27:32.657527 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1279ea7f-350d-4657-8d79-d384e6974700-run-httpd\") pod \"swift-proxy-6bc96d68cf-vw4t2\" (UID: \"1279ea7f-350d-4657-8d79-d384e6974700\") " pod="swift-kuttl-tests/swift-proxy-6bc96d68cf-vw4t2" Jan 29 12:27:32 crc kubenswrapper[4753]: E0129 12:27:32.657901 4753 projected.go:288] Couldn't get configMap swift-kuttl-tests/swift-ring-files: configmap "swift-ring-files" not found Jan 29 12:27:32 crc kubenswrapper[4753]: E0129 12:27:32.657922 4753 projected.go:194] Error preparing data for projected volume etc-swift for pod swift-kuttl-tests/swift-storage-0: configmap "swift-ring-files" not found Jan 29 12:27:32 crc kubenswrapper[4753]: E0129 12:27:32.658004 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/39385d87-5895-47b3-8fa4-dff5549bca97-etc-swift podName:39385d87-5895-47b3-8fa4-dff5549bca97 nodeName:}" failed. No retries permitted until 2026-01-29 12:27:33.657952683 +0000 UTC m=+1267.910034138 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/39385d87-5895-47b3-8fa4-dff5549bca97-etc-swift") pod "swift-storage-0" (UID: "39385d87-5895-47b3-8fa4-dff5549bca97") : configmap "swift-ring-files" not found Jan 29 12:27:32 crc kubenswrapper[4753]: I0129 12:27:32.759479 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1279ea7f-350d-4657-8d79-d384e6974700-config-data\") pod \"swift-proxy-6bc96d68cf-vw4t2\" (UID: \"1279ea7f-350d-4657-8d79-d384e6974700\") " pod="swift-kuttl-tests/swift-proxy-6bc96d68cf-vw4t2" Jan 29 12:27:32 crc kubenswrapper[4753]: I0129 12:27:32.759565 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/1279ea7f-350d-4657-8d79-d384e6974700-etc-swift\") pod \"swift-proxy-6bc96d68cf-vw4t2\" (UID: \"1279ea7f-350d-4657-8d79-d384e6974700\") " pod="swift-kuttl-tests/swift-proxy-6bc96d68cf-vw4t2" Jan 29 12:27:32 crc kubenswrapper[4753]: I0129 12:27:32.759602 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1279ea7f-350d-4657-8d79-d384e6974700-log-httpd\") pod \"swift-proxy-6bc96d68cf-vw4t2\" (UID: \"1279ea7f-350d-4657-8d79-d384e6974700\") " pod="swift-kuttl-tests/swift-proxy-6bc96d68cf-vw4t2" Jan 29 12:27:32 crc kubenswrapper[4753]: I0129 12:27:32.759622 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rbtfh\" (UniqueName: \"kubernetes.io/projected/1279ea7f-350d-4657-8d79-d384e6974700-kube-api-access-rbtfh\") pod \"swift-proxy-6bc96d68cf-vw4t2\" (UID: \"1279ea7f-350d-4657-8d79-d384e6974700\") " pod="swift-kuttl-tests/swift-proxy-6bc96d68cf-vw4t2" Jan 29 12:27:32 crc kubenswrapper[4753]: I0129 12:27:32.759651 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1279ea7f-350d-4657-8d79-d384e6974700-run-httpd\") pod \"swift-proxy-6bc96d68cf-vw4t2\" (UID: \"1279ea7f-350d-4657-8d79-d384e6974700\") " pod="swift-kuttl-tests/swift-proxy-6bc96d68cf-vw4t2" Jan 29 12:27:32 crc kubenswrapper[4753]: E0129 12:27:32.760146 4753 projected.go:288] Couldn't get configMap swift-kuttl-tests/swift-ring-files: configmap "swift-ring-files" not found Jan 29 12:27:32 crc kubenswrapper[4753]: E0129 12:27:32.760202 4753 projected.go:194] Error preparing data for projected volume etc-swift for pod swift-kuttl-tests/swift-proxy-6bc96d68cf-vw4t2: configmap "swift-ring-files" not found Jan 29 12:27:32 crc kubenswrapper[4753]: E0129 12:27:32.760293 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/1279ea7f-350d-4657-8d79-d384e6974700-etc-swift podName:1279ea7f-350d-4657-8d79-d384e6974700 nodeName:}" failed. No retries permitted until 2026-01-29 12:27:33.260268361 +0000 UTC m=+1267.512349816 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/1279ea7f-350d-4657-8d79-d384e6974700-etc-swift") pod "swift-proxy-6bc96d68cf-vw4t2" (UID: "1279ea7f-350d-4657-8d79-d384e6974700") : configmap "swift-ring-files" not found Jan 29 12:27:32 crc kubenswrapper[4753]: I0129 12:27:32.760505 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1279ea7f-350d-4657-8d79-d384e6974700-log-httpd\") pod \"swift-proxy-6bc96d68cf-vw4t2\" (UID: \"1279ea7f-350d-4657-8d79-d384e6974700\") " pod="swift-kuttl-tests/swift-proxy-6bc96d68cf-vw4t2" Jan 29 12:27:32 crc kubenswrapper[4753]: I0129 12:27:32.761151 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1279ea7f-350d-4657-8d79-d384e6974700-run-httpd\") pod \"swift-proxy-6bc96d68cf-vw4t2\" (UID: \"1279ea7f-350d-4657-8d79-d384e6974700\") " pod="swift-kuttl-tests/swift-proxy-6bc96d68cf-vw4t2" Jan 29 12:27:32 crc kubenswrapper[4753]: I0129 12:27:32.767624 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1279ea7f-350d-4657-8d79-d384e6974700-config-data\") pod \"swift-proxy-6bc96d68cf-vw4t2\" (UID: \"1279ea7f-350d-4657-8d79-d384e6974700\") " pod="swift-kuttl-tests/swift-proxy-6bc96d68cf-vw4t2" Jan 29 12:27:32 crc kubenswrapper[4753]: I0129 12:27:32.789807 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rbtfh\" (UniqueName: \"kubernetes.io/projected/1279ea7f-350d-4657-8d79-d384e6974700-kube-api-access-rbtfh\") pod \"swift-proxy-6bc96d68cf-vw4t2\" (UID: \"1279ea7f-350d-4657-8d79-d384e6974700\") " pod="swift-kuttl-tests/swift-proxy-6bc96d68cf-vw4t2" Jan 29 12:27:32 crc kubenswrapper[4753]: I0129 12:27:32.894296 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/swift-ring-rebalance-slxbn"] Jan 29 12:27:33 crc kubenswrapper[4753]: I0129 12:27:33.303844 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/1279ea7f-350d-4657-8d79-d384e6974700-etc-swift\") pod \"swift-proxy-6bc96d68cf-vw4t2\" (UID: \"1279ea7f-350d-4657-8d79-d384e6974700\") " pod="swift-kuttl-tests/swift-proxy-6bc96d68cf-vw4t2" Jan 29 12:27:33 crc kubenswrapper[4753]: E0129 12:27:33.304176 4753 projected.go:288] Couldn't get configMap swift-kuttl-tests/swift-ring-files: configmap "swift-ring-files" not found Jan 29 12:27:33 crc kubenswrapper[4753]: E0129 12:27:33.304213 4753 projected.go:194] Error preparing data for projected volume etc-swift for pod swift-kuttl-tests/swift-proxy-6bc96d68cf-vw4t2: configmap "swift-ring-files" not found Jan 29 12:27:33 crc kubenswrapper[4753]: E0129 12:27:33.304306 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/1279ea7f-350d-4657-8d79-d384e6974700-etc-swift podName:1279ea7f-350d-4657-8d79-d384e6974700 nodeName:}" failed. No retries permitted until 2026-01-29 12:27:34.304283812 +0000 UTC m=+1268.556365267 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/1279ea7f-350d-4657-8d79-d384e6974700-etc-swift") pod "swift-proxy-6bc96d68cf-vw4t2" (UID: "1279ea7f-350d-4657-8d79-d384e6974700") : configmap "swift-ring-files" not found Jan 29 12:27:33 crc kubenswrapper[4753]: I0129 12:27:33.468893 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-ring-rebalance-slxbn" event={"ID":"a925aaa3-40e0-4915-a43d-31c3294b4cbb","Type":"ContainerStarted","Data":"b20ffffd03121f19034afea0ab067bd3b0e3fe8e4f35cb9125e0fdf9f1e3ad2a"} Jan 29 12:27:33 crc kubenswrapper[4753]: I0129 12:27:33.742382 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/39385d87-5895-47b3-8fa4-dff5549bca97-etc-swift\") pod \"swift-storage-0\" (UID: \"39385d87-5895-47b3-8fa4-dff5549bca97\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:27:33 crc kubenswrapper[4753]: E0129 12:27:33.742668 4753 projected.go:288] Couldn't get configMap swift-kuttl-tests/swift-ring-files: configmap "swift-ring-files" not found Jan 29 12:27:33 crc kubenswrapper[4753]: E0129 12:27:33.742689 4753 projected.go:194] Error preparing data for projected volume etc-swift for pod swift-kuttl-tests/swift-storage-0: configmap "swift-ring-files" not found Jan 29 12:27:33 crc kubenswrapper[4753]: E0129 12:27:33.742750 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/39385d87-5895-47b3-8fa4-dff5549bca97-etc-swift podName:39385d87-5895-47b3-8fa4-dff5549bca97 nodeName:}" failed. No retries permitted until 2026-01-29 12:27:35.742730555 +0000 UTC m=+1269.994812010 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/39385d87-5895-47b3-8fa4-dff5549bca97-etc-swift") pod "swift-storage-0" (UID: "39385d87-5895-47b3-8fa4-dff5549bca97") : configmap "swift-ring-files" not found Jan 29 12:27:34 crc kubenswrapper[4753]: I0129 12:27:34.351825 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/1279ea7f-350d-4657-8d79-d384e6974700-etc-swift\") pod \"swift-proxy-6bc96d68cf-vw4t2\" (UID: \"1279ea7f-350d-4657-8d79-d384e6974700\") " pod="swift-kuttl-tests/swift-proxy-6bc96d68cf-vw4t2" Jan 29 12:27:34 crc kubenswrapper[4753]: E0129 12:27:34.352370 4753 projected.go:288] Couldn't get configMap swift-kuttl-tests/swift-ring-files: configmap "swift-ring-files" not found Jan 29 12:27:34 crc kubenswrapper[4753]: E0129 12:27:34.352392 4753 projected.go:194] Error preparing data for projected volume etc-swift for pod swift-kuttl-tests/swift-proxy-6bc96d68cf-vw4t2: configmap "swift-ring-files" not found Jan 29 12:27:34 crc kubenswrapper[4753]: E0129 12:27:34.352452 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/1279ea7f-350d-4657-8d79-d384e6974700-etc-swift podName:1279ea7f-350d-4657-8d79-d384e6974700 nodeName:}" failed. No retries permitted until 2026-01-29 12:27:36.352432488 +0000 UTC m=+1270.604513933 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/1279ea7f-350d-4657-8d79-d384e6974700-etc-swift") pod "swift-proxy-6bc96d68cf-vw4t2" (UID: "1279ea7f-350d-4657-8d79-d384e6974700") : configmap "swift-ring-files" not found Jan 29 12:27:35 crc kubenswrapper[4753]: I0129 12:27:35.823688 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/39385d87-5895-47b3-8fa4-dff5549bca97-etc-swift\") pod \"swift-storage-0\" (UID: \"39385d87-5895-47b3-8fa4-dff5549bca97\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:27:35 crc kubenswrapper[4753]: E0129 12:27:35.823964 4753 projected.go:288] Couldn't get configMap swift-kuttl-tests/swift-ring-files: configmap "swift-ring-files" not found Jan 29 12:27:35 crc kubenswrapper[4753]: E0129 12:27:35.823990 4753 projected.go:194] Error preparing data for projected volume etc-swift for pod swift-kuttl-tests/swift-storage-0: configmap "swift-ring-files" not found Jan 29 12:27:35 crc kubenswrapper[4753]: E0129 12:27:35.824040 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/39385d87-5895-47b3-8fa4-dff5549bca97-etc-swift podName:39385d87-5895-47b3-8fa4-dff5549bca97 nodeName:}" failed. No retries permitted until 2026-01-29 12:27:39.824025911 +0000 UTC m=+1274.076107366 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/39385d87-5895-47b3-8fa4-dff5549bca97-etc-swift") pod "swift-storage-0" (UID: "39385d87-5895-47b3-8fa4-dff5549bca97") : configmap "swift-ring-files" not found Jan 29 12:27:36 crc kubenswrapper[4753]: I0129 12:27:36.354182 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/1279ea7f-350d-4657-8d79-d384e6974700-etc-swift\") pod \"swift-proxy-6bc96d68cf-vw4t2\" (UID: \"1279ea7f-350d-4657-8d79-d384e6974700\") " pod="swift-kuttl-tests/swift-proxy-6bc96d68cf-vw4t2" Jan 29 12:27:36 crc kubenswrapper[4753]: E0129 12:27:36.354414 4753 projected.go:288] Couldn't get configMap swift-kuttl-tests/swift-ring-files: configmap "swift-ring-files" not found Jan 29 12:27:36 crc kubenswrapper[4753]: E0129 12:27:36.354445 4753 projected.go:194] Error preparing data for projected volume etc-swift for pod swift-kuttl-tests/swift-proxy-6bc96d68cf-vw4t2: configmap "swift-ring-files" not found Jan 29 12:27:36 crc kubenswrapper[4753]: E0129 12:27:36.354512 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/1279ea7f-350d-4657-8d79-d384e6974700-etc-swift podName:1279ea7f-350d-4657-8d79-d384e6974700 nodeName:}" failed. No retries permitted until 2026-01-29 12:27:40.354494225 +0000 UTC m=+1274.606575680 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/1279ea7f-350d-4657-8d79-d384e6974700-etc-swift") pod "swift-proxy-6bc96d68cf-vw4t2" (UID: "1279ea7f-350d-4657-8d79-d384e6974700") : configmap "swift-ring-files" not found Jan 29 12:27:37 crc kubenswrapper[4753]: I0129 12:27:37.779189 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-ring-rebalance-slxbn" event={"ID":"a925aaa3-40e0-4915-a43d-31c3294b4cbb","Type":"ContainerStarted","Data":"2bd47ee4d4bdbbab245ac4269049ddfdcc62ea915a2cd8f3521b019a267507cc"} Jan 29 12:27:37 crc kubenswrapper[4753]: I0129 12:27:37.805826 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="swift-kuttl-tests/swift-ring-rebalance-slxbn" podStartSLOduration=3.3183438499999998 podStartE2EDuration="6.805778872s" podCreationTimestamp="2026-01-29 12:27:31 +0000 UTC" firstStartedPulling="2026-01-29 12:27:32.907560337 +0000 UTC m=+1267.159641792" lastFinishedPulling="2026-01-29 12:27:36.394995359 +0000 UTC m=+1270.647076814" observedRunningTime="2026-01-29 12:27:37.800722262 +0000 UTC m=+1272.052803727" watchObservedRunningTime="2026-01-29 12:27:37.805778872 +0000 UTC m=+1272.057860327" Jan 29 12:27:39 crc kubenswrapper[4753]: I0129 12:27:39.921474 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/39385d87-5895-47b3-8fa4-dff5549bca97-etc-swift\") pod \"swift-storage-0\" (UID: \"39385d87-5895-47b3-8fa4-dff5549bca97\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:27:39 crc kubenswrapper[4753]: E0129 12:27:39.921725 4753 projected.go:288] Couldn't get configMap swift-kuttl-tests/swift-ring-files: configmap "swift-ring-files" not found Jan 29 12:27:39 crc kubenswrapper[4753]: E0129 12:27:39.922978 4753 projected.go:194] Error preparing data for projected volume etc-swift for pod swift-kuttl-tests/swift-storage-0: configmap "swift-ring-files" not found Jan 29 12:27:39 crc kubenswrapper[4753]: E0129 12:27:39.923071 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/39385d87-5895-47b3-8fa4-dff5549bca97-etc-swift podName:39385d87-5895-47b3-8fa4-dff5549bca97 nodeName:}" failed. No retries permitted until 2026-01-29 12:27:47.923040107 +0000 UTC m=+1282.175121592 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/39385d87-5895-47b3-8fa4-dff5549bca97-etc-swift") pod "swift-storage-0" (UID: "39385d87-5895-47b3-8fa4-dff5549bca97") : configmap "swift-ring-files" not found Jan 29 12:27:40 crc kubenswrapper[4753]: I0129 12:27:40.430438 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/1279ea7f-350d-4657-8d79-d384e6974700-etc-swift\") pod \"swift-proxy-6bc96d68cf-vw4t2\" (UID: \"1279ea7f-350d-4657-8d79-d384e6974700\") " pod="swift-kuttl-tests/swift-proxy-6bc96d68cf-vw4t2" Jan 29 12:27:40 crc kubenswrapper[4753]: E0129 12:27:40.430684 4753 projected.go:288] Couldn't get configMap swift-kuttl-tests/swift-ring-files: configmap "swift-ring-files" not found Jan 29 12:27:40 crc kubenswrapper[4753]: E0129 12:27:40.430720 4753 projected.go:194] Error preparing data for projected volume etc-swift for pod swift-kuttl-tests/swift-proxy-6bc96d68cf-vw4t2: configmap "swift-ring-files" not found Jan 29 12:27:40 crc kubenswrapper[4753]: E0129 12:27:40.430805 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/1279ea7f-350d-4657-8d79-d384e6974700-etc-swift podName:1279ea7f-350d-4657-8d79-d384e6974700 nodeName:}" failed. No retries permitted until 2026-01-29 12:27:48.430780372 +0000 UTC m=+1282.682861867 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/1279ea7f-350d-4657-8d79-d384e6974700-etc-swift") pod "swift-proxy-6bc96d68cf-vw4t2" (UID: "1279ea7f-350d-4657-8d79-d384e6974700") : configmap "swift-ring-files" not found Jan 29 12:27:44 crc kubenswrapper[4753]: I0129 12:27:44.847338 4753 generic.go:334] "Generic (PLEG): container finished" podID="a925aaa3-40e0-4915-a43d-31c3294b4cbb" containerID="2bd47ee4d4bdbbab245ac4269049ddfdcc62ea915a2cd8f3521b019a267507cc" exitCode=0 Jan 29 12:27:44 crc kubenswrapper[4753]: I0129 12:27:44.847406 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-ring-rebalance-slxbn" event={"ID":"a925aaa3-40e0-4915-a43d-31c3294b4cbb","Type":"ContainerDied","Data":"2bd47ee4d4bdbbab245ac4269049ddfdcc62ea915a2cd8f3521b019a267507cc"} Jan 29 12:27:46 crc kubenswrapper[4753]: I0129 12:27:46.181136 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-ring-rebalance-slxbn" Jan 29 12:27:46 crc kubenswrapper[4753]: I0129 12:27:46.316120 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c765s\" (UniqueName: \"kubernetes.io/projected/a925aaa3-40e0-4915-a43d-31c3294b4cbb-kube-api-access-c765s\") pod \"a925aaa3-40e0-4915-a43d-31c3294b4cbb\" (UID: \"a925aaa3-40e0-4915-a43d-31c3294b4cbb\") " Jan 29 12:27:46 crc kubenswrapper[4753]: I0129 12:27:46.316514 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/a925aaa3-40e0-4915-a43d-31c3294b4cbb-swiftconf\") pod \"a925aaa3-40e0-4915-a43d-31c3294b4cbb\" (UID: \"a925aaa3-40e0-4915-a43d-31c3294b4cbb\") " Jan 29 12:27:46 crc kubenswrapper[4753]: I0129 12:27:46.316550 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/a925aaa3-40e0-4915-a43d-31c3294b4cbb-ring-data-devices\") pod \"a925aaa3-40e0-4915-a43d-31c3294b4cbb\" (UID: \"a925aaa3-40e0-4915-a43d-31c3294b4cbb\") " Jan 29 12:27:46 crc kubenswrapper[4753]: I0129 12:27:46.316592 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/a925aaa3-40e0-4915-a43d-31c3294b4cbb-dispersionconf\") pod \"a925aaa3-40e0-4915-a43d-31c3294b4cbb\" (UID: \"a925aaa3-40e0-4915-a43d-31c3294b4cbb\") " Jan 29 12:27:46 crc kubenswrapper[4753]: I0129 12:27:46.316612 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/a925aaa3-40e0-4915-a43d-31c3294b4cbb-etc-swift\") pod \"a925aaa3-40e0-4915-a43d-31c3294b4cbb\" (UID: \"a925aaa3-40e0-4915-a43d-31c3294b4cbb\") " Jan 29 12:27:46 crc kubenswrapper[4753]: I0129 12:27:46.316660 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a925aaa3-40e0-4915-a43d-31c3294b4cbb-scripts\") pod \"a925aaa3-40e0-4915-a43d-31c3294b4cbb\" (UID: \"a925aaa3-40e0-4915-a43d-31c3294b4cbb\") " Jan 29 12:27:46 crc kubenswrapper[4753]: I0129 12:27:46.317862 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a925aaa3-40e0-4915-a43d-31c3294b4cbb-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "a925aaa3-40e0-4915-a43d-31c3294b4cbb" (UID: "a925aaa3-40e0-4915-a43d-31c3294b4cbb"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:27:46 crc kubenswrapper[4753]: I0129 12:27:46.318663 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a925aaa3-40e0-4915-a43d-31c3294b4cbb-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "a925aaa3-40e0-4915-a43d-31c3294b4cbb" (UID: "a925aaa3-40e0-4915-a43d-31c3294b4cbb"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:27:46 crc kubenswrapper[4753]: I0129 12:27:46.323698 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a925aaa3-40e0-4915-a43d-31c3294b4cbb-kube-api-access-c765s" (OuterVolumeSpecName: "kube-api-access-c765s") pod "a925aaa3-40e0-4915-a43d-31c3294b4cbb" (UID: "a925aaa3-40e0-4915-a43d-31c3294b4cbb"). InnerVolumeSpecName "kube-api-access-c765s". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:27:46 crc kubenswrapper[4753]: I0129 12:27:46.340097 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a925aaa3-40e0-4915-a43d-31c3294b4cbb-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "a925aaa3-40e0-4915-a43d-31c3294b4cbb" (UID: "a925aaa3-40e0-4915-a43d-31c3294b4cbb"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:27:46 crc kubenswrapper[4753]: I0129 12:27:46.342386 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a925aaa3-40e0-4915-a43d-31c3294b4cbb-scripts" (OuterVolumeSpecName: "scripts") pod "a925aaa3-40e0-4915-a43d-31c3294b4cbb" (UID: "a925aaa3-40e0-4915-a43d-31c3294b4cbb"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:27:46 crc kubenswrapper[4753]: I0129 12:27:46.343163 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a925aaa3-40e0-4915-a43d-31c3294b4cbb-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "a925aaa3-40e0-4915-a43d-31c3294b4cbb" (UID: "a925aaa3-40e0-4915-a43d-31c3294b4cbb"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:27:46 crc kubenswrapper[4753]: I0129 12:27:46.418008 4753 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a925aaa3-40e0-4915-a43d-31c3294b4cbb-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 12:27:46 crc kubenswrapper[4753]: I0129 12:27:46.418399 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c765s\" (UniqueName: \"kubernetes.io/projected/a925aaa3-40e0-4915-a43d-31c3294b4cbb-kube-api-access-c765s\") on node \"crc\" DevicePath \"\"" Jan 29 12:27:46 crc kubenswrapper[4753]: I0129 12:27:46.418493 4753 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/a925aaa3-40e0-4915-a43d-31c3294b4cbb-swiftconf\") on node \"crc\" DevicePath \"\"" Jan 29 12:27:46 crc kubenswrapper[4753]: I0129 12:27:46.418574 4753 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/a925aaa3-40e0-4915-a43d-31c3294b4cbb-ring-data-devices\") on node \"crc\" DevicePath \"\"" Jan 29 12:27:46 crc kubenswrapper[4753]: I0129 12:27:46.418656 4753 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/a925aaa3-40e0-4915-a43d-31c3294b4cbb-dispersionconf\") on node \"crc\" DevicePath \"\"" Jan 29 12:27:46 crc kubenswrapper[4753]: I0129 12:27:46.418728 4753 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/a925aaa3-40e0-4915-a43d-31c3294b4cbb-etc-swift\") on node \"crc\" DevicePath \"\"" Jan 29 12:27:46 crc kubenswrapper[4753]: I0129 12:27:46.864643 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-ring-rebalance-slxbn" event={"ID":"a925aaa3-40e0-4915-a43d-31c3294b4cbb","Type":"ContainerDied","Data":"b20ffffd03121f19034afea0ab067bd3b0e3fe8e4f35cb9125e0fdf9f1e3ad2a"} Jan 29 12:27:46 crc kubenswrapper[4753]: I0129 12:27:46.864712 4753 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b20ffffd03121f19034afea0ab067bd3b0e3fe8e4f35cb9125e0fdf9f1e3ad2a" Jan 29 12:27:46 crc kubenswrapper[4753]: I0129 12:27:46.864743 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-ring-rebalance-slxbn" Jan 29 12:27:47 crc kubenswrapper[4753]: I0129 12:27:47.096052 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_swift-ring-rebalance-slxbn_a925aaa3-40e0-4915-a43d-31c3294b4cbb/swift-ring-rebalance/0.log" Jan 29 12:27:47 crc kubenswrapper[4753]: I0129 12:27:47.944035 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/39385d87-5895-47b3-8fa4-dff5549bca97-etc-swift\") pod \"swift-storage-0\" (UID: \"39385d87-5895-47b3-8fa4-dff5549bca97\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:27:47 crc kubenswrapper[4753]: I0129 12:27:47.955212 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/39385d87-5895-47b3-8fa4-dff5549bca97-etc-swift\") pod \"swift-storage-0\" (UID: \"39385d87-5895-47b3-8fa4-dff5549bca97\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:27:48 crc kubenswrapper[4753]: I0129 12:27:48.141183 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:27:48 crc kubenswrapper[4753]: I0129 12:27:48.509424 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/1279ea7f-350d-4657-8d79-d384e6974700-etc-swift\") pod \"swift-proxy-6bc96d68cf-vw4t2\" (UID: \"1279ea7f-350d-4657-8d79-d384e6974700\") " pod="swift-kuttl-tests/swift-proxy-6bc96d68cf-vw4t2" Jan 29 12:27:48 crc kubenswrapper[4753]: I0129 12:27:48.522642 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/1279ea7f-350d-4657-8d79-d384e6974700-etc-swift\") pod \"swift-proxy-6bc96d68cf-vw4t2\" (UID: \"1279ea7f-350d-4657-8d79-d384e6974700\") " pod="swift-kuttl-tests/swift-proxy-6bc96d68cf-vw4t2" Jan 29 12:27:48 crc kubenswrapper[4753]: I0129 12:27:48.731586 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-proxy-6bc96d68cf-vw4t2" Jan 29 12:27:48 crc kubenswrapper[4753]: I0129 12:27:48.736800 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/swift-storage-0"] Jan 29 12:27:48 crc kubenswrapper[4753]: I0129 12:27:48.821736 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_swift-ring-rebalance-slxbn_a925aaa3-40e0-4915-a43d-31c3294b4cbb/swift-ring-rebalance/0.log" Jan 29 12:27:49 crc kubenswrapper[4753]: I0129 12:27:49.022891 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"39385d87-5895-47b3-8fa4-dff5549bca97","Type":"ContainerStarted","Data":"213c58c1ff01cb6eb2c6a589e4bc52ebd541e63a0293cd2b41fa7f74ca31542d"} Jan 29 12:27:49 crc kubenswrapper[4753]: I0129 12:27:49.281418 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/swift-proxy-6bc96d68cf-vw4t2"] Jan 29 12:27:49 crc kubenswrapper[4753]: W0129 12:27:49.282470 4753 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1279ea7f_350d_4657_8d79_d384e6974700.slice/crio-d0e294ae3117d30051d766d0204b85d7e203d7039512163ef3dd7365354ab3ac WatchSource:0}: Error finding container d0e294ae3117d30051d766d0204b85d7e203d7039512163ef3dd7365354ab3ac: Status 404 returned error can't find the container with id d0e294ae3117d30051d766d0204b85d7e203d7039512163ef3dd7365354ab3ac Jan 29 12:27:50 crc kubenswrapper[4753]: I0129 12:27:50.032533 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-proxy-6bc96d68cf-vw4t2" event={"ID":"1279ea7f-350d-4657-8d79-d384e6974700","Type":"ContainerStarted","Data":"9aa82879a77a70118ab711b5e80d710b9819382b80bdf7b5852721678330b2b2"} Jan 29 12:27:50 crc kubenswrapper[4753]: I0129 12:27:50.032905 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-proxy-6bc96d68cf-vw4t2" event={"ID":"1279ea7f-350d-4657-8d79-d384e6974700","Type":"ContainerStarted","Data":"f9e68885ed21f11c82942f493d4bec986cbcb9cfc313e2a9c0c92797057fe94f"} Jan 29 12:27:50 crc kubenswrapper[4753]: I0129 12:27:50.032926 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-proxy-6bc96d68cf-vw4t2" event={"ID":"1279ea7f-350d-4657-8d79-d384e6974700","Type":"ContainerStarted","Data":"d0e294ae3117d30051d766d0204b85d7e203d7039512163ef3dd7365354ab3ac"} Jan 29 12:27:50 crc kubenswrapper[4753]: I0129 12:27:50.032993 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="swift-kuttl-tests/swift-proxy-6bc96d68cf-vw4t2" Jan 29 12:27:50 crc kubenswrapper[4753]: I0129 12:27:50.033018 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="swift-kuttl-tests/swift-proxy-6bc96d68cf-vw4t2" Jan 29 12:27:50 crc kubenswrapper[4753]: I0129 12:27:50.434867 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_swift-ring-rebalance-slxbn_a925aaa3-40e0-4915-a43d-31c3294b4cbb/swift-ring-rebalance/0.log" Jan 29 12:27:51 crc kubenswrapper[4753]: I0129 12:27:51.043634 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"39385d87-5895-47b3-8fa4-dff5549bca97","Type":"ContainerStarted","Data":"984e7ec23fbb1ea4d65de4f32027ca929ec222f93bf54d3a02596d29a4717385"} Jan 29 12:27:51 crc kubenswrapper[4753]: I0129 12:27:51.043726 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"39385d87-5895-47b3-8fa4-dff5549bca97","Type":"ContainerStarted","Data":"063062d207c79c3b7f996082ef0d8f966b55a5285627a39041e569d21584423f"} Jan 29 12:27:51 crc kubenswrapper[4753]: I0129 12:27:51.043743 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"39385d87-5895-47b3-8fa4-dff5549bca97","Type":"ContainerStarted","Data":"af0f9b6fe749f96595fa413d19eb4d05d225aa607b0bdca7f6249d9c3c82af97"} Jan 29 12:27:51 crc kubenswrapper[4753]: I0129 12:27:51.043760 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"39385d87-5895-47b3-8fa4-dff5549bca97","Type":"ContainerStarted","Data":"3726b25caf518cbf35e2f777e83e1ecfee7780f3edbad81b396ea283f5da3a4a"} Jan 29 12:27:51 crc kubenswrapper[4753]: I0129 12:27:51.985656 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_swift-ring-rebalance-slxbn_a925aaa3-40e0-4915-a43d-31c3294b4cbb/swift-ring-rebalance/0.log" Jan 29 12:27:53 crc kubenswrapper[4753]: I0129 12:27:53.073021 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"39385d87-5895-47b3-8fa4-dff5549bca97","Type":"ContainerStarted","Data":"76c9500d039ba3a69ffb165677dd2e12bdadadeb261817ebd10496ce731615ce"} Jan 29 12:27:53 crc kubenswrapper[4753]: I0129 12:27:53.073442 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"39385d87-5895-47b3-8fa4-dff5549bca97","Type":"ContainerStarted","Data":"167d4dbddcf9e32fccd34fe86b69996a5f6c260ba7e291f34f64d24b772b8ac8"} Jan 29 12:27:53 crc kubenswrapper[4753]: I0129 12:27:53.521690 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_swift-ring-rebalance-slxbn_a925aaa3-40e0-4915-a43d-31c3294b4cbb/swift-ring-rebalance/0.log" Jan 29 12:27:54 crc kubenswrapper[4753]: I0129 12:27:54.089758 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"39385d87-5895-47b3-8fa4-dff5549bca97","Type":"ContainerStarted","Data":"737616607dae86265208ff6f145edb0ce8b20dba96eda0daf445337b0fef1935"} Jan 29 12:27:54 crc kubenswrapper[4753]: I0129 12:27:54.090825 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"39385d87-5895-47b3-8fa4-dff5549bca97","Type":"ContainerStarted","Data":"75aca0fed29877eb2ade05283d8faf4a4c3f794d44c85614b217f1bf2ae5b2ce"} Jan 29 12:27:55 crc kubenswrapper[4753]: I0129 12:27:55.107069 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"39385d87-5895-47b3-8fa4-dff5549bca97","Type":"ContainerStarted","Data":"f600c9d18c7bef57d4cdc5e5e4f2ef9ad5b634df5b0c23fc5d34c56cbc0abc7c"} Jan 29 12:27:55 crc kubenswrapper[4753]: I0129 12:27:55.107161 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"39385d87-5895-47b3-8fa4-dff5549bca97","Type":"ContainerStarted","Data":"f512c2300a33e2cb5b259c3418f458b2d6f86634ccce39b9adee884ae693a404"} Jan 29 12:27:55 crc kubenswrapper[4753]: I0129 12:27:55.117025 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_swift-ring-rebalance-slxbn_a925aaa3-40e0-4915-a43d-31c3294b4cbb/swift-ring-rebalance/0.log" Jan 29 12:27:56 crc kubenswrapper[4753]: I0129 12:27:56.121757 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"39385d87-5895-47b3-8fa4-dff5549bca97","Type":"ContainerStarted","Data":"e7da666c1e8d2b0689ffa24259c62b01b913567e00d5b3ed3b19dd77e0c6c533"} Jan 29 12:27:56 crc kubenswrapper[4753]: I0129 12:27:56.122106 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"39385d87-5895-47b3-8fa4-dff5549bca97","Type":"ContainerStarted","Data":"955915dd25543e1efdb915a3879096fba3d38186fe0b1844ff742a04165dbcab"} Jan 29 12:27:56 crc kubenswrapper[4753]: I0129 12:27:56.122125 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"39385d87-5895-47b3-8fa4-dff5549bca97","Type":"ContainerStarted","Data":"60fba7ddfb790e3ab32a5fcb0f5930bccf040407ee1755fd70b5ef489d123112"} Jan 29 12:27:56 crc kubenswrapper[4753]: I0129 12:27:56.122139 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"39385d87-5895-47b3-8fa4-dff5549bca97","Type":"ContainerStarted","Data":"896609a3bbf20ce8ed0c2121726b125b6eb828e48157cc650e81099744c07e6a"} Jan 29 12:27:56 crc kubenswrapper[4753]: I0129 12:27:56.122152 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"39385d87-5895-47b3-8fa4-dff5549bca97","Type":"ContainerStarted","Data":"b5dbc9e432cab22467e73f936a4bd9ed835e89758f8d7b9248aa062527139167"} Jan 29 12:27:56 crc kubenswrapper[4753]: I0129 12:27:56.179356 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="swift-kuttl-tests/swift-proxy-6bc96d68cf-vw4t2" podStartSLOduration=24.179290696 podStartE2EDuration="24.179290696s" podCreationTimestamp="2026-01-29 12:27:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:27:50.06263712 +0000 UTC m=+1284.314718575" watchObservedRunningTime="2026-01-29 12:27:56.179290696 +0000 UTC m=+1290.431372151" Jan 29 12:27:56 crc kubenswrapper[4753]: I0129 12:27:56.180066 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="swift-kuttl-tests/swift-storage-0" podStartSLOduration=20.48585586 podStartE2EDuration="26.180057127s" podCreationTimestamp="2026-01-29 12:27:30 +0000 UTC" firstStartedPulling="2026-01-29 12:27:48.759520911 +0000 UTC m=+1283.011602366" lastFinishedPulling="2026-01-29 12:27:54.453722178 +0000 UTC m=+1288.705803633" observedRunningTime="2026-01-29 12:27:56.172439715 +0000 UTC m=+1290.424521190" watchObservedRunningTime="2026-01-29 12:27:56.180057127 +0000 UTC m=+1290.432138582" Jan 29 12:27:56 crc kubenswrapper[4753]: I0129 12:27:56.872929 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_swift-ring-rebalance-slxbn_a925aaa3-40e0-4915-a43d-31c3294b4cbb/swift-ring-rebalance/0.log" Jan 29 12:27:58 crc kubenswrapper[4753]: I0129 12:27:58.473200 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_swift-ring-rebalance-slxbn_a925aaa3-40e0-4915-a43d-31c3294b4cbb/swift-ring-rebalance/0.log" Jan 29 12:27:58 crc kubenswrapper[4753]: I0129 12:27:58.734462 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="swift-kuttl-tests/swift-proxy-6bc96d68cf-vw4t2" Jan 29 12:27:58 crc kubenswrapper[4753]: I0129 12:27:58.735808 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="swift-kuttl-tests/swift-proxy-6bc96d68cf-vw4t2" Jan 29 12:28:00 crc kubenswrapper[4753]: I0129 12:28:00.127164 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_swift-ring-rebalance-slxbn_a925aaa3-40e0-4915-a43d-31c3294b4cbb/swift-ring-rebalance/0.log" Jan 29 12:28:01 crc kubenswrapper[4753]: I0129 12:28:01.676899 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_swift-ring-rebalance-slxbn_a925aaa3-40e0-4915-a43d-31c3294b4cbb/swift-ring-rebalance/0.log" Jan 29 12:28:03 crc kubenswrapper[4753]: I0129 12:28:03.107756 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["swift-kuttl-tests/swift-storage-2"] Jan 29 12:28:03 crc kubenswrapper[4753]: E0129 12:28:03.108398 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a925aaa3-40e0-4915-a43d-31c3294b4cbb" containerName="swift-ring-rebalance" Jan 29 12:28:03 crc kubenswrapper[4753]: I0129 12:28:03.108437 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="a925aaa3-40e0-4915-a43d-31c3294b4cbb" containerName="swift-ring-rebalance" Jan 29 12:28:03 crc kubenswrapper[4753]: I0129 12:28:03.108686 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="a925aaa3-40e0-4915-a43d-31c3294b4cbb" containerName="swift-ring-rebalance" Jan 29 12:28:03 crc kubenswrapper[4753]: I0129 12:28:03.114802 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-storage-2" Jan 29 12:28:03 crc kubenswrapper[4753]: I0129 12:28:03.120594 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["swift-kuttl-tests/swift-storage-1"] Jan 29 12:28:03 crc kubenswrapper[4753]: I0129 12:28:03.128455 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/swift-storage-2"] Jan 29 12:28:03 crc kubenswrapper[4753]: I0129 12:28:03.128655 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-storage-1" Jan 29 12:28:03 crc kubenswrapper[4753]: I0129 12:28:03.137217 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/swift-storage-1"] Jan 29 12:28:03 crc kubenswrapper[4753]: I0129 12:28:03.292348 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc-etc-swift\") pod \"swift-storage-2\" (UID: \"32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc\") " pod="swift-kuttl-tests/swift-storage-2" Jan 29 12:28:03 crc kubenswrapper[4753]: I0129 12:28:03.324700 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc-lock\") pod \"swift-storage-2\" (UID: \"32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc\") " pod="swift-kuttl-tests/swift-storage-2" Jan 29 12:28:03 crc kubenswrapper[4753]: I0129 12:28:03.326764 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/ddeb279d-2025-4326-8e65-865d310f7c0a-cache\") pod \"swift-storage-1\" (UID: \"ddeb279d-2025-4326-8e65-865d310f7c0a\") " pod="swift-kuttl-tests/swift-storage-1" Jan 29 12:28:03 crc kubenswrapper[4753]: I0129 12:28:03.326833 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z446w\" (UniqueName: \"kubernetes.io/projected/32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc-kube-api-access-z446w\") pod \"swift-storage-2\" (UID: \"32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc\") " pod="swift-kuttl-tests/swift-storage-2" Jan 29 12:28:03 crc kubenswrapper[4753]: I0129 12:28:03.326957 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"swift-storage-2\" (UID: \"32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc\") " pod="swift-kuttl-tests/swift-storage-2" Jan 29 12:28:03 crc kubenswrapper[4753]: I0129 12:28:03.326992 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/ddeb279d-2025-4326-8e65-865d310f7c0a-etc-swift\") pod \"swift-storage-1\" (UID: \"ddeb279d-2025-4326-8e65-865d310f7c0a\") " pod="swift-kuttl-tests/swift-storage-1" Jan 29 12:28:03 crc kubenswrapper[4753]: I0129 12:28:03.327035 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc-cache\") pod \"swift-storage-2\" (UID: \"32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc\") " pod="swift-kuttl-tests/swift-storage-2" Jan 29 12:28:03 crc kubenswrapper[4753]: I0129 12:28:03.327138 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/ddeb279d-2025-4326-8e65-865d310f7c0a-lock\") pod \"swift-storage-1\" (UID: \"ddeb279d-2025-4326-8e65-865d310f7c0a\") " pod="swift-kuttl-tests/swift-storage-1" Jan 29 12:28:03 crc kubenswrapper[4753]: I0129 12:28:03.327260 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"swift-storage-1\" (UID: \"ddeb279d-2025-4326-8e65-865d310f7c0a\") " pod="swift-kuttl-tests/swift-storage-1" Jan 29 12:28:03 crc kubenswrapper[4753]: I0129 12:28:03.327317 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qqqqf\" (UniqueName: \"kubernetes.io/projected/ddeb279d-2025-4326-8e65-865d310f7c0a-kube-api-access-qqqqf\") pod \"swift-storage-1\" (UID: \"ddeb279d-2025-4326-8e65-865d310f7c0a\") " pod="swift-kuttl-tests/swift-storage-1" Jan 29 12:28:03 crc kubenswrapper[4753]: I0129 12:28:03.428267 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/ddeb279d-2025-4326-8e65-865d310f7c0a-lock\") pod \"swift-storage-1\" (UID: \"ddeb279d-2025-4326-8e65-865d310f7c0a\") " pod="swift-kuttl-tests/swift-storage-1" Jan 29 12:28:03 crc kubenswrapper[4753]: I0129 12:28:03.428646 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"swift-storage-1\" (UID: \"ddeb279d-2025-4326-8e65-865d310f7c0a\") " pod="swift-kuttl-tests/swift-storage-1" Jan 29 12:28:03 crc kubenswrapper[4753]: I0129 12:28:03.428690 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qqqqf\" (UniqueName: \"kubernetes.io/projected/ddeb279d-2025-4326-8e65-865d310f7c0a-kube-api-access-qqqqf\") pod \"swift-storage-1\" (UID: \"ddeb279d-2025-4326-8e65-865d310f7c0a\") " pod="swift-kuttl-tests/swift-storage-1" Jan 29 12:28:03 crc kubenswrapper[4753]: I0129 12:28:03.428726 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc-etc-swift\") pod \"swift-storage-2\" (UID: \"32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc\") " pod="swift-kuttl-tests/swift-storage-2" Jan 29 12:28:03 crc kubenswrapper[4753]: I0129 12:28:03.428762 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc-lock\") pod \"swift-storage-2\" (UID: \"32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc\") " pod="swift-kuttl-tests/swift-storage-2" Jan 29 12:28:03 crc kubenswrapper[4753]: I0129 12:28:03.428812 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/ddeb279d-2025-4326-8e65-865d310f7c0a-cache\") pod \"swift-storage-1\" (UID: \"ddeb279d-2025-4326-8e65-865d310f7c0a\") " pod="swift-kuttl-tests/swift-storage-1" Jan 29 12:28:03 crc kubenswrapper[4753]: I0129 12:28:03.428943 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z446w\" (UniqueName: \"kubernetes.io/projected/32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc-kube-api-access-z446w\") pod \"swift-storage-2\" (UID: \"32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc\") " pod="swift-kuttl-tests/swift-storage-2" Jan 29 12:28:03 crc kubenswrapper[4753]: I0129 12:28:03.429021 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"swift-storage-2\" (UID: \"32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc\") " pod="swift-kuttl-tests/swift-storage-2" Jan 29 12:28:03 crc kubenswrapper[4753]: I0129 12:28:03.429049 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/ddeb279d-2025-4326-8e65-865d310f7c0a-etc-swift\") pod \"swift-storage-1\" (UID: \"ddeb279d-2025-4326-8e65-865d310f7c0a\") " pod="swift-kuttl-tests/swift-storage-1" Jan 29 12:28:03 crc kubenswrapper[4753]: I0129 12:28:03.429091 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc-cache\") pod \"swift-storage-2\" (UID: \"32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc\") " pod="swift-kuttl-tests/swift-storage-2" Jan 29 12:28:03 crc kubenswrapper[4753]: I0129 12:28:03.429214 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc-lock\") pod \"swift-storage-2\" (UID: \"32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc\") " pod="swift-kuttl-tests/swift-storage-2" Jan 29 12:28:03 crc kubenswrapper[4753]: I0129 12:28:03.429211 4753 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"swift-storage-1\" (UID: \"ddeb279d-2025-4326-8e65-865d310f7c0a\") device mount path \"/mnt/openstack/pv06\"" pod="swift-kuttl-tests/swift-storage-1" Jan 29 12:28:03 crc kubenswrapper[4753]: I0129 12:28:03.429333 4753 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"swift-storage-2\" (UID: \"32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc\") device mount path \"/mnt/openstack/pv04\"" pod="swift-kuttl-tests/swift-storage-2" Jan 29 12:28:03 crc kubenswrapper[4753]: I0129 12:28:03.429460 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc-cache\") pod \"swift-storage-2\" (UID: \"32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc\") " pod="swift-kuttl-tests/swift-storage-2" Jan 29 12:28:03 crc kubenswrapper[4753]: I0129 12:28:03.429547 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/ddeb279d-2025-4326-8e65-865d310f7c0a-lock\") pod \"swift-storage-1\" (UID: \"ddeb279d-2025-4326-8e65-865d310f7c0a\") " pod="swift-kuttl-tests/swift-storage-1" Jan 29 12:28:03 crc kubenswrapper[4753]: I0129 12:28:03.429749 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/ddeb279d-2025-4326-8e65-865d310f7c0a-cache\") pod \"swift-storage-1\" (UID: \"ddeb279d-2025-4326-8e65-865d310f7c0a\") " pod="swift-kuttl-tests/swift-storage-1" Jan 29 12:28:03 crc kubenswrapper[4753]: I0129 12:28:03.437626 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc-etc-swift\") pod \"swift-storage-2\" (UID: \"32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc\") " pod="swift-kuttl-tests/swift-storage-2" Jan 29 12:28:03 crc kubenswrapper[4753]: I0129 12:28:03.438315 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/ddeb279d-2025-4326-8e65-865d310f7c0a-etc-swift\") pod \"swift-storage-1\" (UID: \"ddeb279d-2025-4326-8e65-865d310f7c0a\") " pod="swift-kuttl-tests/swift-storage-1" Jan 29 12:28:03 crc kubenswrapper[4753]: I0129 12:28:03.456745 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z446w\" (UniqueName: \"kubernetes.io/projected/32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc-kube-api-access-z446w\") pod \"swift-storage-2\" (UID: \"32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc\") " pod="swift-kuttl-tests/swift-storage-2" Jan 29 12:28:03 crc kubenswrapper[4753]: I0129 12:28:03.458124 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qqqqf\" (UniqueName: \"kubernetes.io/projected/ddeb279d-2025-4326-8e65-865d310f7c0a-kube-api-access-qqqqf\") pod \"swift-storage-1\" (UID: \"ddeb279d-2025-4326-8e65-865d310f7c0a\") " pod="swift-kuttl-tests/swift-storage-1" Jan 29 12:28:03 crc kubenswrapper[4753]: I0129 12:28:03.463940 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"swift-storage-2\" (UID: \"32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc\") " pod="swift-kuttl-tests/swift-storage-2" Jan 29 12:28:03 crc kubenswrapper[4753]: I0129 12:28:03.465704 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"swift-storage-1\" (UID: \"ddeb279d-2025-4326-8e65-865d310f7c0a\") " pod="swift-kuttl-tests/swift-storage-1" Jan 29 12:28:03 crc kubenswrapper[4753]: I0129 12:28:03.740011 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-storage-2" Jan 29 12:28:03 crc kubenswrapper[4753]: I0129 12:28:03.751132 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-storage-1" Jan 29 12:28:04 crc kubenswrapper[4753]: W0129 12:28:04.289771 4753 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod32ea34f5_5f12_4c38_8b0b_6ca91dfbb2dc.slice/crio-44898ac6a06f8bfc1c454ff65ed0567198704e01dd2cacf07c4f71d291cf462e WatchSource:0}: Error finding container 44898ac6a06f8bfc1c454ff65ed0567198704e01dd2cacf07c4f71d291cf462e: Status 404 returned error can't find the container with id 44898ac6a06f8bfc1c454ff65ed0567198704e01dd2cacf07c4f71d291cf462e Jan 29 12:28:04 crc kubenswrapper[4753]: I0129 12:28:04.292846 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/swift-storage-2"] Jan 29 12:28:04 crc kubenswrapper[4753]: I0129 12:28:04.383477 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/swift-storage-1"] Jan 29 12:28:04 crc kubenswrapper[4753]: I0129 12:28:04.502012 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc","Type":"ContainerStarted","Data":"fe8f9ebcc1c86411e3bb093c67fb21d93a96fee55951ceee7e3131efeb068618"} Jan 29 12:28:04 crc kubenswrapper[4753]: I0129 12:28:04.502066 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc","Type":"ContainerStarted","Data":"44898ac6a06f8bfc1c454ff65ed0567198704e01dd2cacf07c4f71d291cf462e"} Jan 29 12:28:04 crc kubenswrapper[4753]: I0129 12:28:04.503889 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"ddeb279d-2025-4326-8e65-865d310f7c0a","Type":"ContainerStarted","Data":"fa7b05f6c58595f497027116e691b925777e495abff02bf0eb48128d2f59d397"} Jan 29 12:28:05 crc kubenswrapper[4753]: I0129 12:28:05.526544 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc","Type":"ContainerStarted","Data":"c61d771f239c1ddb78ae193005b7f28411347094488416d74c36b5a4e1af184e"} Jan 29 12:28:05 crc kubenswrapper[4753]: I0129 12:28:05.526911 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc","Type":"ContainerStarted","Data":"7205dfe02ab43d1f70f31ef2ec36df92f3b6ddfe4b58c079fa1bef08461d8f7d"} Jan 29 12:28:05 crc kubenswrapper[4753]: I0129 12:28:05.526924 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc","Type":"ContainerStarted","Data":"00c1eb87259c9f9ca790410d154830681a972b41be2f543d941b16c768d02c7a"} Jan 29 12:28:05 crc kubenswrapper[4753]: I0129 12:28:05.526934 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc","Type":"ContainerStarted","Data":"4be3d87956e4951b9b943798ec79dccb05d39a88a8d1c684b6af2f4d1d49dd8a"} Jan 29 12:28:05 crc kubenswrapper[4753]: I0129 12:28:05.526944 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc","Type":"ContainerStarted","Data":"0fdf7c43e207058700148694e8d0512172a5c08367d82685ee34cb21a7d777bb"} Jan 29 12:28:05 crc kubenswrapper[4753]: I0129 12:28:05.526955 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc","Type":"ContainerStarted","Data":"ffb8afe70efce4899f28e6a84a2da82ec2152af28d185ef2246910c3630e20af"} Jan 29 12:28:05 crc kubenswrapper[4753]: I0129 12:28:05.530168 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"ddeb279d-2025-4326-8e65-865d310f7c0a","Type":"ContainerStarted","Data":"c2c67d359cc4c35fc1b93d25f31dd1acbe50d819456e7ba45edbca5cc66b76ad"} Jan 29 12:28:05 crc kubenswrapper[4753]: I0129 12:28:05.530199 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"ddeb279d-2025-4326-8e65-865d310f7c0a","Type":"ContainerStarted","Data":"ab8ab1b0296c8d63c94fe76b9be4dff85e2c3d3c5e119f4c61a4e952a3eacb57"} Jan 29 12:28:05 crc kubenswrapper[4753]: I0129 12:28:05.530212 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"ddeb279d-2025-4326-8e65-865d310f7c0a","Type":"ContainerStarted","Data":"c02e393358bd7fc72cbcc938357a001d8e45a3614c97f8c21ef2b425a11ea56c"} Jan 29 12:28:05 crc kubenswrapper[4753]: I0129 12:28:05.530236 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"ddeb279d-2025-4326-8e65-865d310f7c0a","Type":"ContainerStarted","Data":"8066888e8c4974033e42ad712726fb89b198041760f9941ccf400414fe67f7c4"} Jan 29 12:28:05 crc kubenswrapper[4753]: I0129 12:28:05.530246 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"ddeb279d-2025-4326-8e65-865d310f7c0a","Type":"ContainerStarted","Data":"62808980bc02e825395831219271d7565dbce4f504194b0b1d24f3ec68d29c7a"} Jan 29 12:28:05 crc kubenswrapper[4753]: I0129 12:28:05.530259 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"ddeb279d-2025-4326-8e65-865d310f7c0a","Type":"ContainerStarted","Data":"e2f1954c5859377fa2807abc43754082b700caff19ed4069e8ed7c2b84149b60"} Jan 29 12:28:06 crc kubenswrapper[4753]: I0129 12:28:06.780008 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc","Type":"ContainerStarted","Data":"ff839774eea656b737782f6cc51d5decbaa50e4fcaeed6ad6d8c0f31282f312e"} Jan 29 12:28:06 crc kubenswrapper[4753]: I0129 12:28:06.780387 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc","Type":"ContainerStarted","Data":"400a061c4bd0a357658de03c8e6bf769bb555b258f4dc874056c3bc75b8d5f62"} Jan 29 12:28:06 crc kubenswrapper[4753]: I0129 12:28:06.780399 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc","Type":"ContainerStarted","Data":"8c90ac4f21fbe6fd19be163628758ffd03ac6be361d6e346e128041b97d6eb05"} Jan 29 12:28:06 crc kubenswrapper[4753]: I0129 12:28:06.780408 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc","Type":"ContainerStarted","Data":"b601e419b6959550e2118567697a445d92d4c7c854603a5c05ad0a8bd8840b30"} Jan 29 12:28:06 crc kubenswrapper[4753]: I0129 12:28:06.780433 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc","Type":"ContainerStarted","Data":"ff3a6b0d2ffa6545d5a3788e7c7dea23a83af8af68f1a3dbb4abcd33d05b9901"} Jan 29 12:28:06 crc kubenswrapper[4753]: I0129 12:28:06.823809 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"ddeb279d-2025-4326-8e65-865d310f7c0a","Type":"ContainerStarted","Data":"74e579bb81dd2d0a90b87f4e8962a16fb2e6352c7384af62591ce4ac200cc268"} Jan 29 12:28:06 crc kubenswrapper[4753]: I0129 12:28:06.823869 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"ddeb279d-2025-4326-8e65-865d310f7c0a","Type":"ContainerStarted","Data":"7a17416ceba41ee4beed6c5cd6e769268f0fb17d13e4cf402b2974089c52b3c5"} Jan 29 12:28:06 crc kubenswrapper[4753]: I0129 12:28:06.823885 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"ddeb279d-2025-4326-8e65-865d310f7c0a","Type":"ContainerStarted","Data":"78e57805ad2975127bfcf29509b23f4d1294dcfd0848150a2111e6b61c4ddfba"} Jan 29 12:28:06 crc kubenswrapper[4753]: I0129 12:28:06.823898 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"ddeb279d-2025-4326-8e65-865d310f7c0a","Type":"ContainerStarted","Data":"e35b583e269bd87ef2536a7423d7e23d108405a6dac8032ce67f1432845ac9ee"} Jan 29 12:28:06 crc kubenswrapper[4753]: I0129 12:28:06.823911 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"ddeb279d-2025-4326-8e65-865d310f7c0a","Type":"ContainerStarted","Data":"39cbef45410dc33c3ccd280dd8dbd93028f01d71d87119a2009d38d2fab5647f"} Jan 29 12:28:07 crc kubenswrapper[4753]: I0129 12:28:07.059294 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["swift-kuttl-tests/swift-ring-rebalance-slxbn"] Jan 29 12:28:07 crc kubenswrapper[4753]: I0129 12:28:07.070324 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["swift-kuttl-tests/swift-ring-rebalance-slxbn"] Jan 29 12:28:07 crc kubenswrapper[4753]: I0129 12:28:07.096365 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["swift-kuttl-tests/swift-ring-rebalance-rl5q8"] Jan 29 12:28:07 crc kubenswrapper[4753]: I0129 12:28:07.097711 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-ring-rebalance-rl5q8" Jan 29 12:28:07 crc kubenswrapper[4753]: I0129 12:28:07.101356 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"swift-kuttl-tests"/"swift-ring-config-data" Jan 29 12:28:07 crc kubenswrapper[4753]: I0129 12:28:07.101674 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"swift-kuttl-tests"/"swift-ring-scripts" Jan 29 12:28:07 crc kubenswrapper[4753]: I0129 12:28:07.108419 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/swift-ring-rebalance-rl5q8"] Jan 29 12:28:07 crc kubenswrapper[4753]: I0129 12:28:07.122369 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/74a6966a-a456-4920-89a6-a231ed9bb1a7-ring-data-devices\") pod \"swift-ring-rebalance-rl5q8\" (UID: \"74a6966a-a456-4920-89a6-a231ed9bb1a7\") " pod="swift-kuttl-tests/swift-ring-rebalance-rl5q8" Jan 29 12:28:07 crc kubenswrapper[4753]: I0129 12:28:07.122445 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/74a6966a-a456-4920-89a6-a231ed9bb1a7-dispersionconf\") pod \"swift-ring-rebalance-rl5q8\" (UID: \"74a6966a-a456-4920-89a6-a231ed9bb1a7\") " pod="swift-kuttl-tests/swift-ring-rebalance-rl5q8" Jan 29 12:28:07 crc kubenswrapper[4753]: I0129 12:28:07.122472 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/74a6966a-a456-4920-89a6-a231ed9bb1a7-etc-swift\") pod \"swift-ring-rebalance-rl5q8\" (UID: \"74a6966a-a456-4920-89a6-a231ed9bb1a7\") " pod="swift-kuttl-tests/swift-ring-rebalance-rl5q8" Jan 29 12:28:07 crc kubenswrapper[4753]: I0129 12:28:07.122503 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rxjvd\" (UniqueName: \"kubernetes.io/projected/74a6966a-a456-4920-89a6-a231ed9bb1a7-kube-api-access-rxjvd\") pod \"swift-ring-rebalance-rl5q8\" (UID: \"74a6966a-a456-4920-89a6-a231ed9bb1a7\") " pod="swift-kuttl-tests/swift-ring-rebalance-rl5q8" Jan 29 12:28:07 crc kubenswrapper[4753]: I0129 12:28:07.122529 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/74a6966a-a456-4920-89a6-a231ed9bb1a7-scripts\") pod \"swift-ring-rebalance-rl5q8\" (UID: \"74a6966a-a456-4920-89a6-a231ed9bb1a7\") " pod="swift-kuttl-tests/swift-ring-rebalance-rl5q8" Jan 29 12:28:07 crc kubenswrapper[4753]: I0129 12:28:07.122593 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/74a6966a-a456-4920-89a6-a231ed9bb1a7-swiftconf\") pod \"swift-ring-rebalance-rl5q8\" (UID: \"74a6966a-a456-4920-89a6-a231ed9bb1a7\") " pod="swift-kuttl-tests/swift-ring-rebalance-rl5q8" Jan 29 12:28:07 crc kubenswrapper[4753]: I0129 12:28:07.274943 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/74a6966a-a456-4920-89a6-a231ed9bb1a7-dispersionconf\") pod \"swift-ring-rebalance-rl5q8\" (UID: \"74a6966a-a456-4920-89a6-a231ed9bb1a7\") " pod="swift-kuttl-tests/swift-ring-rebalance-rl5q8" Jan 29 12:28:07 crc kubenswrapper[4753]: I0129 12:28:07.274990 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/74a6966a-a456-4920-89a6-a231ed9bb1a7-etc-swift\") pod \"swift-ring-rebalance-rl5q8\" (UID: \"74a6966a-a456-4920-89a6-a231ed9bb1a7\") " pod="swift-kuttl-tests/swift-ring-rebalance-rl5q8" Jan 29 12:28:07 crc kubenswrapper[4753]: I0129 12:28:07.275024 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rxjvd\" (UniqueName: \"kubernetes.io/projected/74a6966a-a456-4920-89a6-a231ed9bb1a7-kube-api-access-rxjvd\") pod \"swift-ring-rebalance-rl5q8\" (UID: \"74a6966a-a456-4920-89a6-a231ed9bb1a7\") " pod="swift-kuttl-tests/swift-ring-rebalance-rl5q8" Jan 29 12:28:07 crc kubenswrapper[4753]: I0129 12:28:07.275050 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/74a6966a-a456-4920-89a6-a231ed9bb1a7-scripts\") pod \"swift-ring-rebalance-rl5q8\" (UID: \"74a6966a-a456-4920-89a6-a231ed9bb1a7\") " pod="swift-kuttl-tests/swift-ring-rebalance-rl5q8" Jan 29 12:28:07 crc kubenswrapper[4753]: I0129 12:28:07.275094 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/74a6966a-a456-4920-89a6-a231ed9bb1a7-swiftconf\") pod \"swift-ring-rebalance-rl5q8\" (UID: \"74a6966a-a456-4920-89a6-a231ed9bb1a7\") " pod="swift-kuttl-tests/swift-ring-rebalance-rl5q8" Jan 29 12:28:07 crc kubenswrapper[4753]: I0129 12:28:07.275129 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/74a6966a-a456-4920-89a6-a231ed9bb1a7-ring-data-devices\") pod \"swift-ring-rebalance-rl5q8\" (UID: \"74a6966a-a456-4920-89a6-a231ed9bb1a7\") " pod="swift-kuttl-tests/swift-ring-rebalance-rl5q8" Jan 29 12:28:07 crc kubenswrapper[4753]: I0129 12:28:07.275864 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/74a6966a-a456-4920-89a6-a231ed9bb1a7-ring-data-devices\") pod \"swift-ring-rebalance-rl5q8\" (UID: \"74a6966a-a456-4920-89a6-a231ed9bb1a7\") " pod="swift-kuttl-tests/swift-ring-rebalance-rl5q8" Jan 29 12:28:07 crc kubenswrapper[4753]: I0129 12:28:07.281702 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/74a6966a-a456-4920-89a6-a231ed9bb1a7-etc-swift\") pod \"swift-ring-rebalance-rl5q8\" (UID: \"74a6966a-a456-4920-89a6-a231ed9bb1a7\") " pod="swift-kuttl-tests/swift-ring-rebalance-rl5q8" Jan 29 12:28:07 crc kubenswrapper[4753]: I0129 12:28:07.281888 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/74a6966a-a456-4920-89a6-a231ed9bb1a7-scripts\") pod \"swift-ring-rebalance-rl5q8\" (UID: \"74a6966a-a456-4920-89a6-a231ed9bb1a7\") " pod="swift-kuttl-tests/swift-ring-rebalance-rl5q8" Jan 29 12:28:07 crc kubenswrapper[4753]: I0129 12:28:07.291252 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/74a6966a-a456-4920-89a6-a231ed9bb1a7-swiftconf\") pod \"swift-ring-rebalance-rl5q8\" (UID: \"74a6966a-a456-4920-89a6-a231ed9bb1a7\") " pod="swift-kuttl-tests/swift-ring-rebalance-rl5q8" Jan 29 12:28:07 crc kubenswrapper[4753]: I0129 12:28:07.292850 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/74a6966a-a456-4920-89a6-a231ed9bb1a7-dispersionconf\") pod \"swift-ring-rebalance-rl5q8\" (UID: \"74a6966a-a456-4920-89a6-a231ed9bb1a7\") " pod="swift-kuttl-tests/swift-ring-rebalance-rl5q8" Jan 29 12:28:07 crc kubenswrapper[4753]: I0129 12:28:07.297815 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rxjvd\" (UniqueName: \"kubernetes.io/projected/74a6966a-a456-4920-89a6-a231ed9bb1a7-kube-api-access-rxjvd\") pod \"swift-ring-rebalance-rl5q8\" (UID: \"74a6966a-a456-4920-89a6-a231ed9bb1a7\") " pod="swift-kuttl-tests/swift-ring-rebalance-rl5q8" Jan 29 12:28:07 crc kubenswrapper[4753]: I0129 12:28:07.433513 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-ring-rebalance-rl5q8" Jan 29 12:28:07 crc kubenswrapper[4753]: I0129 12:28:07.679492 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/swift-ring-rebalance-rl5q8"] Jan 29 12:28:07 crc kubenswrapper[4753]: W0129 12:28:07.696920 4753 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod74a6966a_a456_4920_89a6_a231ed9bb1a7.slice/crio-c940dbe48d2af07ecf94678273be75f84ee8c5fe2c8ce6e53830bbf33bcf0d06 WatchSource:0}: Error finding container c940dbe48d2af07ecf94678273be75f84ee8c5fe2c8ce6e53830bbf33bcf0d06: Status 404 returned error can't find the container with id c940dbe48d2af07ecf94678273be75f84ee8c5fe2c8ce6e53830bbf33bcf0d06 Jan 29 12:28:07 crc kubenswrapper[4753]: I0129 12:28:07.995176 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a925aaa3-40e0-4915-a43d-31c3294b4cbb" path="/var/lib/kubelet/pods/a925aaa3-40e0-4915-a43d-31c3294b4cbb/volumes" Jan 29 12:28:08 crc kubenswrapper[4753]: I0129 12:28:08.005930 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc","Type":"ContainerStarted","Data":"263c42f493bd95e422bf98dec8882fa3d3524e56d9ce4bc9f9ecf43ceb8c3a2e"} Jan 29 12:28:08 crc kubenswrapper[4753]: I0129 12:28:08.005991 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc","Type":"ContainerStarted","Data":"e886a1fa21080f1c391ce8ba831cdb18c9181775550b92bbd69711a723a6c2ca"} Jan 29 12:28:08 crc kubenswrapper[4753]: I0129 12:28:08.007264 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-ring-rebalance-rl5q8" event={"ID":"74a6966a-a456-4920-89a6-a231ed9bb1a7","Type":"ContainerStarted","Data":"c940dbe48d2af07ecf94678273be75f84ee8c5fe2c8ce6e53830bbf33bcf0d06"} Jan 29 12:28:08 crc kubenswrapper[4753]: I0129 12:28:08.027907 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"ddeb279d-2025-4326-8e65-865d310f7c0a","Type":"ContainerStarted","Data":"a60f6b51faf65756a3c0471962b2bdf525b7e3029c0357dd153d2da4daa57b8a"} Jan 29 12:28:08 crc kubenswrapper[4753]: I0129 12:28:08.027960 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"ddeb279d-2025-4326-8e65-865d310f7c0a","Type":"ContainerStarted","Data":"13d565afdd53734d519d75262cc241dad905071cb7825483fd615878fa346029"} Jan 29 12:28:09 crc kubenswrapper[4753]: I0129 12:28:09.047302 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc","Type":"ContainerStarted","Data":"089bfd5f212edcb63517b6be2c9e93e85788d0a36322b33d159dc4276eeffa8c"} Jan 29 12:28:09 crc kubenswrapper[4753]: I0129 12:28:09.060989 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-ring-rebalance-rl5q8" event={"ID":"74a6966a-a456-4920-89a6-a231ed9bb1a7","Type":"ContainerStarted","Data":"f7f5aab11c2304c788f61add30450086c7482019316e81864a6f9044c0d8019e"} Jan 29 12:28:09 crc kubenswrapper[4753]: I0129 12:28:09.070833 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"ddeb279d-2025-4326-8e65-865d310f7c0a","Type":"ContainerStarted","Data":"40e6b5dc9cea06540198cf963e5c70e91ace9da403911908c871f752fe076bc2"} Jan 29 12:28:09 crc kubenswrapper[4753]: I0129 12:28:09.096831 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="swift-kuttl-tests/swift-storage-2" podStartSLOduration=7.096810917 podStartE2EDuration="7.096810917s" podCreationTimestamp="2026-01-29 12:28:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:28:09.090140269 +0000 UTC m=+1303.342221724" watchObservedRunningTime="2026-01-29 12:28:09.096810917 +0000 UTC m=+1303.348892372" Jan 29 12:28:09 crc kubenswrapper[4753]: I0129 12:28:09.112275 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="swift-kuttl-tests/swift-ring-rebalance-rl5q8" podStartSLOduration=2.112247703 podStartE2EDuration="2.112247703s" podCreationTimestamp="2026-01-29 12:28:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:28:09.106695276 +0000 UTC m=+1303.358776751" watchObservedRunningTime="2026-01-29 12:28:09.112247703 +0000 UTC m=+1303.364329158" Jan 29 12:28:10 crc kubenswrapper[4753]: I0129 12:28:10.094601 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"ddeb279d-2025-4326-8e65-865d310f7c0a","Type":"ContainerStarted","Data":"8e356a88be616efe8f74f65c56281ee6e48af8b76b2a36f9cf9de679f1c7b6b8"} Jan 29 12:28:10 crc kubenswrapper[4753]: I0129 12:28:10.141877 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="swift-kuttl-tests/swift-storage-1" podStartSLOduration=8.141849317 podStartE2EDuration="8.141849317s" podCreationTimestamp="2026-01-29 12:28:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:28:10.135461437 +0000 UTC m=+1304.387542912" watchObservedRunningTime="2026-01-29 12:28:10.141849317 +0000 UTC m=+1304.393930772" Jan 29 12:28:18 crc kubenswrapper[4753]: I0129 12:28:18.384211 4753 generic.go:334] "Generic (PLEG): container finished" podID="74a6966a-a456-4920-89a6-a231ed9bb1a7" containerID="f7f5aab11c2304c788f61add30450086c7482019316e81864a6f9044c0d8019e" exitCode=0 Jan 29 12:28:18 crc kubenswrapper[4753]: I0129 12:28:18.384766 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-ring-rebalance-rl5q8" event={"ID":"74a6966a-a456-4920-89a6-a231ed9bb1a7","Type":"ContainerDied","Data":"f7f5aab11c2304c788f61add30450086c7482019316e81864a6f9044c0d8019e"} Jan 29 12:28:19 crc kubenswrapper[4753]: I0129 12:28:19.709083 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-ring-rebalance-rl5q8" Jan 29 12:28:19 crc kubenswrapper[4753]: I0129 12:28:19.727422 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/74a6966a-a456-4920-89a6-a231ed9bb1a7-dispersionconf\") pod \"74a6966a-a456-4920-89a6-a231ed9bb1a7\" (UID: \"74a6966a-a456-4920-89a6-a231ed9bb1a7\") " Jan 29 12:28:19 crc kubenswrapper[4753]: I0129 12:28:19.727768 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/74a6966a-a456-4920-89a6-a231ed9bb1a7-scripts\") pod \"74a6966a-a456-4920-89a6-a231ed9bb1a7\" (UID: \"74a6966a-a456-4920-89a6-a231ed9bb1a7\") " Jan 29 12:28:19 crc kubenswrapper[4753]: I0129 12:28:19.727857 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rxjvd\" (UniqueName: \"kubernetes.io/projected/74a6966a-a456-4920-89a6-a231ed9bb1a7-kube-api-access-rxjvd\") pod \"74a6966a-a456-4920-89a6-a231ed9bb1a7\" (UID: \"74a6966a-a456-4920-89a6-a231ed9bb1a7\") " Jan 29 12:28:19 crc kubenswrapper[4753]: I0129 12:28:19.727979 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/74a6966a-a456-4920-89a6-a231ed9bb1a7-swiftconf\") pod \"74a6966a-a456-4920-89a6-a231ed9bb1a7\" (UID: \"74a6966a-a456-4920-89a6-a231ed9bb1a7\") " Jan 29 12:28:19 crc kubenswrapper[4753]: I0129 12:28:19.728207 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/74a6966a-a456-4920-89a6-a231ed9bb1a7-ring-data-devices\") pod \"74a6966a-a456-4920-89a6-a231ed9bb1a7\" (UID: \"74a6966a-a456-4920-89a6-a231ed9bb1a7\") " Jan 29 12:28:19 crc kubenswrapper[4753]: I0129 12:28:19.729876 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/74a6966a-a456-4920-89a6-a231ed9bb1a7-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "74a6966a-a456-4920-89a6-a231ed9bb1a7" (UID: "74a6966a-a456-4920-89a6-a231ed9bb1a7"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:28:19 crc kubenswrapper[4753]: I0129 12:28:19.747647 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/74a6966a-a456-4920-89a6-a231ed9bb1a7-kube-api-access-rxjvd" (OuterVolumeSpecName: "kube-api-access-rxjvd") pod "74a6966a-a456-4920-89a6-a231ed9bb1a7" (UID: "74a6966a-a456-4920-89a6-a231ed9bb1a7"). InnerVolumeSpecName "kube-api-access-rxjvd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:28:19 crc kubenswrapper[4753]: I0129 12:28:19.755175 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/74a6966a-a456-4920-89a6-a231ed9bb1a7-scripts" (OuterVolumeSpecName: "scripts") pod "74a6966a-a456-4920-89a6-a231ed9bb1a7" (UID: "74a6966a-a456-4920-89a6-a231ed9bb1a7"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:28:19 crc kubenswrapper[4753]: I0129 12:28:19.756335 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/74a6966a-a456-4920-89a6-a231ed9bb1a7-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "74a6966a-a456-4920-89a6-a231ed9bb1a7" (UID: "74a6966a-a456-4920-89a6-a231ed9bb1a7"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:28:19 crc kubenswrapper[4753]: I0129 12:28:19.760888 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/74a6966a-a456-4920-89a6-a231ed9bb1a7-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "74a6966a-a456-4920-89a6-a231ed9bb1a7" (UID: "74a6966a-a456-4920-89a6-a231ed9bb1a7"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:28:19 crc kubenswrapper[4753]: I0129 12:28:19.830001 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/74a6966a-a456-4920-89a6-a231ed9bb1a7-etc-swift\") pod \"74a6966a-a456-4920-89a6-a231ed9bb1a7\" (UID: \"74a6966a-a456-4920-89a6-a231ed9bb1a7\") " Jan 29 12:28:19 crc kubenswrapper[4753]: I0129 12:28:19.830711 4753 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/74a6966a-a456-4920-89a6-a231ed9bb1a7-ring-data-devices\") on node \"crc\" DevicePath \"\"" Jan 29 12:28:19 crc kubenswrapper[4753]: I0129 12:28:19.830748 4753 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/74a6966a-a456-4920-89a6-a231ed9bb1a7-dispersionconf\") on node \"crc\" DevicePath \"\"" Jan 29 12:28:19 crc kubenswrapper[4753]: I0129 12:28:19.830769 4753 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/74a6966a-a456-4920-89a6-a231ed9bb1a7-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 12:28:19 crc kubenswrapper[4753]: I0129 12:28:19.830788 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rxjvd\" (UniqueName: \"kubernetes.io/projected/74a6966a-a456-4920-89a6-a231ed9bb1a7-kube-api-access-rxjvd\") on node \"crc\" DevicePath \"\"" Jan 29 12:28:19 crc kubenswrapper[4753]: I0129 12:28:19.830807 4753 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/74a6966a-a456-4920-89a6-a231ed9bb1a7-swiftconf\") on node \"crc\" DevicePath \"\"" Jan 29 12:28:19 crc kubenswrapper[4753]: I0129 12:28:19.831629 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/74a6966a-a456-4920-89a6-a231ed9bb1a7-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "74a6966a-a456-4920-89a6-a231ed9bb1a7" (UID: "74a6966a-a456-4920-89a6-a231ed9bb1a7"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:28:19 crc kubenswrapper[4753]: I0129 12:28:19.931549 4753 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/74a6966a-a456-4920-89a6-a231ed9bb1a7-etc-swift\") on node \"crc\" DevicePath \"\"" Jan 29 12:28:20 crc kubenswrapper[4753]: I0129 12:28:20.400873 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-ring-rebalance-rl5q8" event={"ID":"74a6966a-a456-4920-89a6-a231ed9bb1a7","Type":"ContainerDied","Data":"c940dbe48d2af07ecf94678273be75f84ee8c5fe2c8ce6e53830bbf33bcf0d06"} Jan 29 12:28:20 crc kubenswrapper[4753]: I0129 12:28:20.400921 4753 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c940dbe48d2af07ecf94678273be75f84ee8c5fe2c8ce6e53830bbf33bcf0d06" Jan 29 12:28:20 crc kubenswrapper[4753]: I0129 12:28:20.400988 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-ring-rebalance-rl5q8" Jan 29 12:28:20 crc kubenswrapper[4753]: I0129 12:28:20.662472 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["swift-kuttl-tests/swift-ring-rebalance-debug-lkhcw"] Jan 29 12:28:20 crc kubenswrapper[4753]: E0129 12:28:20.662828 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="74a6966a-a456-4920-89a6-a231ed9bb1a7" containerName="swift-ring-rebalance" Jan 29 12:28:20 crc kubenswrapper[4753]: I0129 12:28:20.662841 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="74a6966a-a456-4920-89a6-a231ed9bb1a7" containerName="swift-ring-rebalance" Jan 29 12:28:20 crc kubenswrapper[4753]: I0129 12:28:20.663009 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="74a6966a-a456-4920-89a6-a231ed9bb1a7" containerName="swift-ring-rebalance" Jan 29 12:28:20 crc kubenswrapper[4753]: I0129 12:28:20.663568 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-ring-rebalance-debug-lkhcw" Jan 29 12:28:20 crc kubenswrapper[4753]: I0129 12:28:20.667107 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"swift-kuttl-tests"/"swift-ring-config-data" Jan 29 12:28:20 crc kubenswrapper[4753]: I0129 12:28:20.667141 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"swift-kuttl-tests"/"swift-ring-scripts" Jan 29 12:28:20 crc kubenswrapper[4753]: I0129 12:28:20.683118 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/swift-ring-rebalance-debug-lkhcw"] Jan 29 12:28:20 crc kubenswrapper[4753]: I0129 12:28:20.853249 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/31e114dd-78cd-4a76-a238-daa4cbe2166a-swiftconf\") pod \"swift-ring-rebalance-debug-lkhcw\" (UID: \"31e114dd-78cd-4a76-a238-daa4cbe2166a\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-lkhcw" Jan 29 12:28:20 crc kubenswrapper[4753]: I0129 12:28:20.853311 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/31e114dd-78cd-4a76-a238-daa4cbe2166a-ring-data-devices\") pod \"swift-ring-rebalance-debug-lkhcw\" (UID: \"31e114dd-78cd-4a76-a238-daa4cbe2166a\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-lkhcw" Jan 29 12:28:20 crc kubenswrapper[4753]: I0129 12:28:20.853355 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/31e114dd-78cd-4a76-a238-daa4cbe2166a-etc-swift\") pod \"swift-ring-rebalance-debug-lkhcw\" (UID: \"31e114dd-78cd-4a76-a238-daa4cbe2166a\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-lkhcw" Jan 29 12:28:20 crc kubenswrapper[4753]: I0129 12:28:20.853623 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/31e114dd-78cd-4a76-a238-daa4cbe2166a-dispersionconf\") pod \"swift-ring-rebalance-debug-lkhcw\" (UID: \"31e114dd-78cd-4a76-a238-daa4cbe2166a\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-lkhcw" Jan 29 12:28:20 crc kubenswrapper[4753]: I0129 12:28:20.853791 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/31e114dd-78cd-4a76-a238-daa4cbe2166a-scripts\") pod \"swift-ring-rebalance-debug-lkhcw\" (UID: \"31e114dd-78cd-4a76-a238-daa4cbe2166a\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-lkhcw" Jan 29 12:28:20 crc kubenswrapper[4753]: I0129 12:28:20.853854 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-khgwj\" (UniqueName: \"kubernetes.io/projected/31e114dd-78cd-4a76-a238-daa4cbe2166a-kube-api-access-khgwj\") pod \"swift-ring-rebalance-debug-lkhcw\" (UID: \"31e114dd-78cd-4a76-a238-daa4cbe2166a\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-lkhcw" Jan 29 12:28:20 crc kubenswrapper[4753]: I0129 12:28:20.955292 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/31e114dd-78cd-4a76-a238-daa4cbe2166a-dispersionconf\") pod \"swift-ring-rebalance-debug-lkhcw\" (UID: \"31e114dd-78cd-4a76-a238-daa4cbe2166a\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-lkhcw" Jan 29 12:28:20 crc kubenswrapper[4753]: I0129 12:28:20.955402 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/31e114dd-78cd-4a76-a238-daa4cbe2166a-scripts\") pod \"swift-ring-rebalance-debug-lkhcw\" (UID: \"31e114dd-78cd-4a76-a238-daa4cbe2166a\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-lkhcw" Jan 29 12:28:20 crc kubenswrapper[4753]: I0129 12:28:20.955436 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-khgwj\" (UniqueName: \"kubernetes.io/projected/31e114dd-78cd-4a76-a238-daa4cbe2166a-kube-api-access-khgwj\") pod \"swift-ring-rebalance-debug-lkhcw\" (UID: \"31e114dd-78cd-4a76-a238-daa4cbe2166a\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-lkhcw" Jan 29 12:28:20 crc kubenswrapper[4753]: I0129 12:28:20.955481 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/31e114dd-78cd-4a76-a238-daa4cbe2166a-swiftconf\") pod \"swift-ring-rebalance-debug-lkhcw\" (UID: \"31e114dd-78cd-4a76-a238-daa4cbe2166a\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-lkhcw" Jan 29 12:28:20 crc kubenswrapper[4753]: I0129 12:28:20.955712 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/31e114dd-78cd-4a76-a238-daa4cbe2166a-ring-data-devices\") pod \"swift-ring-rebalance-debug-lkhcw\" (UID: \"31e114dd-78cd-4a76-a238-daa4cbe2166a\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-lkhcw" Jan 29 12:28:20 crc kubenswrapper[4753]: I0129 12:28:20.955760 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/31e114dd-78cd-4a76-a238-daa4cbe2166a-etc-swift\") pod \"swift-ring-rebalance-debug-lkhcw\" (UID: \"31e114dd-78cd-4a76-a238-daa4cbe2166a\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-lkhcw" Jan 29 12:28:20 crc kubenswrapper[4753]: I0129 12:28:20.956197 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/31e114dd-78cd-4a76-a238-daa4cbe2166a-etc-swift\") pod \"swift-ring-rebalance-debug-lkhcw\" (UID: \"31e114dd-78cd-4a76-a238-daa4cbe2166a\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-lkhcw" Jan 29 12:28:20 crc kubenswrapper[4753]: I0129 12:28:20.957021 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/31e114dd-78cd-4a76-a238-daa4cbe2166a-scripts\") pod \"swift-ring-rebalance-debug-lkhcw\" (UID: \"31e114dd-78cd-4a76-a238-daa4cbe2166a\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-lkhcw" Jan 29 12:28:20 crc kubenswrapper[4753]: I0129 12:28:20.957300 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/31e114dd-78cd-4a76-a238-daa4cbe2166a-ring-data-devices\") pod \"swift-ring-rebalance-debug-lkhcw\" (UID: \"31e114dd-78cd-4a76-a238-daa4cbe2166a\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-lkhcw" Jan 29 12:28:20 crc kubenswrapper[4753]: I0129 12:28:20.967978 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/31e114dd-78cd-4a76-a238-daa4cbe2166a-dispersionconf\") pod \"swift-ring-rebalance-debug-lkhcw\" (UID: \"31e114dd-78cd-4a76-a238-daa4cbe2166a\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-lkhcw" Jan 29 12:28:20 crc kubenswrapper[4753]: I0129 12:28:20.968337 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/31e114dd-78cd-4a76-a238-daa4cbe2166a-swiftconf\") pod \"swift-ring-rebalance-debug-lkhcw\" (UID: \"31e114dd-78cd-4a76-a238-daa4cbe2166a\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-lkhcw" Jan 29 12:28:20 crc kubenswrapper[4753]: I0129 12:28:20.986192 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-khgwj\" (UniqueName: \"kubernetes.io/projected/31e114dd-78cd-4a76-a238-daa4cbe2166a-kube-api-access-khgwj\") pod \"swift-ring-rebalance-debug-lkhcw\" (UID: \"31e114dd-78cd-4a76-a238-daa4cbe2166a\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-lkhcw" Jan 29 12:28:21 crc kubenswrapper[4753]: I0129 12:28:21.281685 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-ring-rebalance-debug-lkhcw" Jan 29 12:28:21 crc kubenswrapper[4753]: I0129 12:28:21.697711 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/swift-ring-rebalance-debug-lkhcw"] Jan 29 12:28:22 crc kubenswrapper[4753]: I0129 12:28:22.417672 4753 generic.go:334] "Generic (PLEG): container finished" podID="31e114dd-78cd-4a76-a238-daa4cbe2166a" containerID="70e4660c73c90c485ebbd783974961ea62fef64df2887bdf96e5a33a1f9884b5" exitCode=0 Jan 29 12:28:22 crc kubenswrapper[4753]: I0129 12:28:22.417980 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-ring-rebalance-debug-lkhcw" event={"ID":"31e114dd-78cd-4a76-a238-daa4cbe2166a","Type":"ContainerDied","Data":"70e4660c73c90c485ebbd783974961ea62fef64df2887bdf96e5a33a1f9884b5"} Jan 29 12:28:22 crc kubenswrapper[4753]: I0129 12:28:22.418014 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-ring-rebalance-debug-lkhcw" event={"ID":"31e114dd-78cd-4a76-a238-daa4cbe2166a","Type":"ContainerStarted","Data":"d7bf047ebd6152aeeab625f6ba9b97786ff506cadda2be81455f8ab997191a49"} Jan 29 12:28:22 crc kubenswrapper[4753]: I0129 12:28:22.462600 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["swift-kuttl-tests/swift-ring-rebalance-debug-lkhcw"] Jan 29 12:28:22 crc kubenswrapper[4753]: I0129 12:28:22.472272 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["swift-kuttl-tests/swift-ring-rebalance-debug-lkhcw"] Jan 29 12:28:23 crc kubenswrapper[4753]: I0129 12:28:23.724327 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-ring-rebalance-debug-lkhcw" Jan 29 12:28:23 crc kubenswrapper[4753]: I0129 12:28:23.832443 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/31e114dd-78cd-4a76-a238-daa4cbe2166a-ring-data-devices\") pod \"31e114dd-78cd-4a76-a238-daa4cbe2166a\" (UID: \"31e114dd-78cd-4a76-a238-daa4cbe2166a\") " Jan 29 12:28:23 crc kubenswrapper[4753]: I0129 12:28:23.832899 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/31e114dd-78cd-4a76-a238-daa4cbe2166a-etc-swift\") pod \"31e114dd-78cd-4a76-a238-daa4cbe2166a\" (UID: \"31e114dd-78cd-4a76-a238-daa4cbe2166a\") " Jan 29 12:28:23 crc kubenswrapper[4753]: I0129 12:28:23.832975 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/31e114dd-78cd-4a76-a238-daa4cbe2166a-dispersionconf\") pod \"31e114dd-78cd-4a76-a238-daa4cbe2166a\" (UID: \"31e114dd-78cd-4a76-a238-daa4cbe2166a\") " Jan 29 12:28:23 crc kubenswrapper[4753]: I0129 12:28:23.833004 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-khgwj\" (UniqueName: \"kubernetes.io/projected/31e114dd-78cd-4a76-a238-daa4cbe2166a-kube-api-access-khgwj\") pod \"31e114dd-78cd-4a76-a238-daa4cbe2166a\" (UID: \"31e114dd-78cd-4a76-a238-daa4cbe2166a\") " Jan 29 12:28:23 crc kubenswrapper[4753]: I0129 12:28:23.833082 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/31e114dd-78cd-4a76-a238-daa4cbe2166a-scripts\") pod \"31e114dd-78cd-4a76-a238-daa4cbe2166a\" (UID: \"31e114dd-78cd-4a76-a238-daa4cbe2166a\") " Jan 29 12:28:23 crc kubenswrapper[4753]: I0129 12:28:23.833129 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/31e114dd-78cd-4a76-a238-daa4cbe2166a-swiftconf\") pod \"31e114dd-78cd-4a76-a238-daa4cbe2166a\" (UID: \"31e114dd-78cd-4a76-a238-daa4cbe2166a\") " Jan 29 12:28:23 crc kubenswrapper[4753]: I0129 12:28:23.833448 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31e114dd-78cd-4a76-a238-daa4cbe2166a-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "31e114dd-78cd-4a76-a238-daa4cbe2166a" (UID: "31e114dd-78cd-4a76-a238-daa4cbe2166a"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:28:23 crc kubenswrapper[4753]: I0129 12:28:23.833561 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/31e114dd-78cd-4a76-a238-daa4cbe2166a-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "31e114dd-78cd-4a76-a238-daa4cbe2166a" (UID: "31e114dd-78cd-4a76-a238-daa4cbe2166a"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:28:23 crc kubenswrapper[4753]: I0129 12:28:23.834344 4753 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/31e114dd-78cd-4a76-a238-daa4cbe2166a-ring-data-devices\") on node \"crc\" DevicePath \"\"" Jan 29 12:28:23 crc kubenswrapper[4753]: I0129 12:28:23.834372 4753 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/31e114dd-78cd-4a76-a238-daa4cbe2166a-etc-swift\") on node \"crc\" DevicePath \"\"" Jan 29 12:28:23 crc kubenswrapper[4753]: I0129 12:28:23.909156 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31e114dd-78cd-4a76-a238-daa4cbe2166a-kube-api-access-khgwj" (OuterVolumeSpecName: "kube-api-access-khgwj") pod "31e114dd-78cd-4a76-a238-daa4cbe2166a" (UID: "31e114dd-78cd-4a76-a238-daa4cbe2166a"). InnerVolumeSpecName "kube-api-access-khgwj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:28:23 crc kubenswrapper[4753]: I0129 12:28:23.914499 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31e114dd-78cd-4a76-a238-daa4cbe2166a-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "31e114dd-78cd-4a76-a238-daa4cbe2166a" (UID: "31e114dd-78cd-4a76-a238-daa4cbe2166a"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:28:23 crc kubenswrapper[4753]: I0129 12:28:23.914531 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31e114dd-78cd-4a76-a238-daa4cbe2166a-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "31e114dd-78cd-4a76-a238-daa4cbe2166a" (UID: "31e114dd-78cd-4a76-a238-daa4cbe2166a"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:28:23 crc kubenswrapper[4753]: I0129 12:28:23.921245 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31e114dd-78cd-4a76-a238-daa4cbe2166a-scripts" (OuterVolumeSpecName: "scripts") pod "31e114dd-78cd-4a76-a238-daa4cbe2166a" (UID: "31e114dd-78cd-4a76-a238-daa4cbe2166a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:28:23 crc kubenswrapper[4753]: I0129 12:28:23.938727 4753 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/31e114dd-78cd-4a76-a238-daa4cbe2166a-dispersionconf\") on node \"crc\" DevicePath \"\"" Jan 29 12:28:23 crc kubenswrapper[4753]: I0129 12:28:23.939037 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-khgwj\" (UniqueName: \"kubernetes.io/projected/31e114dd-78cd-4a76-a238-daa4cbe2166a-kube-api-access-khgwj\") on node \"crc\" DevicePath \"\"" Jan 29 12:28:23 crc kubenswrapper[4753]: I0129 12:28:23.939334 4753 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/31e114dd-78cd-4a76-a238-daa4cbe2166a-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 12:28:23 crc kubenswrapper[4753]: I0129 12:28:23.939426 4753 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/31e114dd-78cd-4a76-a238-daa4cbe2166a-swiftconf\") on node \"crc\" DevicePath \"\"" Jan 29 12:28:23 crc kubenswrapper[4753]: I0129 12:28:23.968178 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["swift-kuttl-tests/swift-ring-rebalance-debug-ch5l8"] Jan 29 12:28:23 crc kubenswrapper[4753]: E0129 12:28:23.968591 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="31e114dd-78cd-4a76-a238-daa4cbe2166a" containerName="swift-ring-rebalance" Jan 29 12:28:23 crc kubenswrapper[4753]: I0129 12:28:23.968611 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="31e114dd-78cd-4a76-a238-daa4cbe2166a" containerName="swift-ring-rebalance" Jan 29 12:28:23 crc kubenswrapper[4753]: I0129 12:28:23.968829 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="31e114dd-78cd-4a76-a238-daa4cbe2166a" containerName="swift-ring-rebalance" Jan 29 12:28:23 crc kubenswrapper[4753]: I0129 12:28:23.969434 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-ring-rebalance-debug-ch5l8" Jan 29 12:28:23 crc kubenswrapper[4753]: I0129 12:28:23.982067 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/swift-ring-rebalance-debug-ch5l8"] Jan 29 12:28:24 crc kubenswrapper[4753]: I0129 12:28:24.041256 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/cf9cdf86-3873-41b5-be7c-2aeae3201b49-swiftconf\") pod \"swift-ring-rebalance-debug-ch5l8\" (UID: \"cf9cdf86-3873-41b5-be7c-2aeae3201b49\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-ch5l8" Jan 29 12:28:24 crc kubenswrapper[4753]: I0129 12:28:24.041345 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/cf9cdf86-3873-41b5-be7c-2aeae3201b49-scripts\") pod \"swift-ring-rebalance-debug-ch5l8\" (UID: \"cf9cdf86-3873-41b5-be7c-2aeae3201b49\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-ch5l8" Jan 29 12:28:24 crc kubenswrapper[4753]: I0129 12:28:24.041387 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4d7kd\" (UniqueName: \"kubernetes.io/projected/cf9cdf86-3873-41b5-be7c-2aeae3201b49-kube-api-access-4d7kd\") pod \"swift-ring-rebalance-debug-ch5l8\" (UID: \"cf9cdf86-3873-41b5-be7c-2aeae3201b49\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-ch5l8" Jan 29 12:28:24 crc kubenswrapper[4753]: I0129 12:28:24.041420 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/cf9cdf86-3873-41b5-be7c-2aeae3201b49-dispersionconf\") pod \"swift-ring-rebalance-debug-ch5l8\" (UID: \"cf9cdf86-3873-41b5-be7c-2aeae3201b49\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-ch5l8" Jan 29 12:28:24 crc kubenswrapper[4753]: I0129 12:28:24.041665 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/cf9cdf86-3873-41b5-be7c-2aeae3201b49-etc-swift\") pod \"swift-ring-rebalance-debug-ch5l8\" (UID: \"cf9cdf86-3873-41b5-be7c-2aeae3201b49\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-ch5l8" Jan 29 12:28:24 crc kubenswrapper[4753]: I0129 12:28:24.041864 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/cf9cdf86-3873-41b5-be7c-2aeae3201b49-ring-data-devices\") pod \"swift-ring-rebalance-debug-ch5l8\" (UID: \"cf9cdf86-3873-41b5-be7c-2aeae3201b49\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-ch5l8" Jan 29 12:28:24 crc kubenswrapper[4753]: I0129 12:28:24.143885 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/cf9cdf86-3873-41b5-be7c-2aeae3201b49-etc-swift\") pod \"swift-ring-rebalance-debug-ch5l8\" (UID: \"cf9cdf86-3873-41b5-be7c-2aeae3201b49\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-ch5l8" Jan 29 12:28:24 crc kubenswrapper[4753]: I0129 12:28:24.143997 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/cf9cdf86-3873-41b5-be7c-2aeae3201b49-ring-data-devices\") pod \"swift-ring-rebalance-debug-ch5l8\" (UID: \"cf9cdf86-3873-41b5-be7c-2aeae3201b49\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-ch5l8" Jan 29 12:28:24 crc kubenswrapper[4753]: I0129 12:28:24.144038 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/cf9cdf86-3873-41b5-be7c-2aeae3201b49-swiftconf\") pod \"swift-ring-rebalance-debug-ch5l8\" (UID: \"cf9cdf86-3873-41b5-be7c-2aeae3201b49\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-ch5l8" Jan 29 12:28:24 crc kubenswrapper[4753]: I0129 12:28:24.144120 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/cf9cdf86-3873-41b5-be7c-2aeae3201b49-scripts\") pod \"swift-ring-rebalance-debug-ch5l8\" (UID: \"cf9cdf86-3873-41b5-be7c-2aeae3201b49\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-ch5l8" Jan 29 12:28:24 crc kubenswrapper[4753]: I0129 12:28:24.144178 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4d7kd\" (UniqueName: \"kubernetes.io/projected/cf9cdf86-3873-41b5-be7c-2aeae3201b49-kube-api-access-4d7kd\") pod \"swift-ring-rebalance-debug-ch5l8\" (UID: \"cf9cdf86-3873-41b5-be7c-2aeae3201b49\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-ch5l8" Jan 29 12:28:24 crc kubenswrapper[4753]: I0129 12:28:24.144272 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/cf9cdf86-3873-41b5-be7c-2aeae3201b49-dispersionconf\") pod \"swift-ring-rebalance-debug-ch5l8\" (UID: \"cf9cdf86-3873-41b5-be7c-2aeae3201b49\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-ch5l8" Jan 29 12:28:24 crc kubenswrapper[4753]: I0129 12:28:24.144893 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/cf9cdf86-3873-41b5-be7c-2aeae3201b49-etc-swift\") pod \"swift-ring-rebalance-debug-ch5l8\" (UID: \"cf9cdf86-3873-41b5-be7c-2aeae3201b49\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-ch5l8" Jan 29 12:28:24 crc kubenswrapper[4753]: I0129 12:28:24.145792 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/cf9cdf86-3873-41b5-be7c-2aeae3201b49-ring-data-devices\") pod \"swift-ring-rebalance-debug-ch5l8\" (UID: \"cf9cdf86-3873-41b5-be7c-2aeae3201b49\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-ch5l8" Jan 29 12:28:24 crc kubenswrapper[4753]: I0129 12:28:24.146274 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/cf9cdf86-3873-41b5-be7c-2aeae3201b49-scripts\") pod \"swift-ring-rebalance-debug-ch5l8\" (UID: \"cf9cdf86-3873-41b5-be7c-2aeae3201b49\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-ch5l8" Jan 29 12:28:24 crc kubenswrapper[4753]: I0129 12:28:24.149584 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/cf9cdf86-3873-41b5-be7c-2aeae3201b49-dispersionconf\") pod \"swift-ring-rebalance-debug-ch5l8\" (UID: \"cf9cdf86-3873-41b5-be7c-2aeae3201b49\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-ch5l8" Jan 29 12:28:24 crc kubenswrapper[4753]: I0129 12:28:24.150687 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/cf9cdf86-3873-41b5-be7c-2aeae3201b49-swiftconf\") pod \"swift-ring-rebalance-debug-ch5l8\" (UID: \"cf9cdf86-3873-41b5-be7c-2aeae3201b49\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-ch5l8" Jan 29 12:28:24 crc kubenswrapper[4753]: I0129 12:28:24.166373 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4d7kd\" (UniqueName: \"kubernetes.io/projected/cf9cdf86-3873-41b5-be7c-2aeae3201b49-kube-api-access-4d7kd\") pod \"swift-ring-rebalance-debug-ch5l8\" (UID: \"cf9cdf86-3873-41b5-be7c-2aeae3201b49\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-ch5l8" Jan 29 12:28:24 crc kubenswrapper[4753]: I0129 12:28:24.297310 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-ring-rebalance-debug-ch5l8" Jan 29 12:28:24 crc kubenswrapper[4753]: I0129 12:28:24.448935 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-ring-rebalance-debug-lkhcw" Jan 29 12:28:24 crc kubenswrapper[4753]: I0129 12:28:24.449136 4753 scope.go:117] "RemoveContainer" containerID="70e4660c73c90c485ebbd783974961ea62fef64df2887bdf96e5a33a1f9884b5" Jan 29 12:28:24 crc kubenswrapper[4753]: I0129 12:28:24.537033 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/swift-ring-rebalance-debug-ch5l8"] Jan 29 12:28:25 crc kubenswrapper[4753]: I0129 12:28:25.457239 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-ring-rebalance-debug-ch5l8" event={"ID":"cf9cdf86-3873-41b5-be7c-2aeae3201b49","Type":"ContainerStarted","Data":"1d5cdc8f08ae8466fb784c47f39fc69136e4c211b8b0c851aa095272b8270acb"} Jan 29 12:28:25 crc kubenswrapper[4753]: I0129 12:28:25.457573 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-ring-rebalance-debug-ch5l8" event={"ID":"cf9cdf86-3873-41b5-be7c-2aeae3201b49","Type":"ContainerStarted","Data":"345c7a9b3b4ffad160c10684a72a6ea92f97b54a6d4dba838cc207676db5573f"} Jan 29 12:28:25 crc kubenswrapper[4753]: I0129 12:28:25.479829 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="swift-kuttl-tests/swift-ring-rebalance-debug-ch5l8" podStartSLOduration=2.47980616 podStartE2EDuration="2.47980616s" podCreationTimestamp="2026-01-29 12:28:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:28:25.475880169 +0000 UTC m=+1319.727961674" watchObservedRunningTime="2026-01-29 12:28:25.47980616 +0000 UTC m=+1319.731887655" Jan 29 12:28:25 crc kubenswrapper[4753]: I0129 12:28:25.898174 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31e114dd-78cd-4a76-a238-daa4cbe2166a" path="/var/lib/kubelet/pods/31e114dd-78cd-4a76-a238-daa4cbe2166a/volumes" Jan 29 12:28:26 crc kubenswrapper[4753]: I0129 12:28:26.468434 4753 generic.go:334] "Generic (PLEG): container finished" podID="cf9cdf86-3873-41b5-be7c-2aeae3201b49" containerID="1d5cdc8f08ae8466fb784c47f39fc69136e4c211b8b0c851aa095272b8270acb" exitCode=0 Jan 29 12:28:26 crc kubenswrapper[4753]: I0129 12:28:26.468484 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-ring-rebalance-debug-ch5l8" event={"ID":"cf9cdf86-3873-41b5-be7c-2aeae3201b49","Type":"ContainerDied","Data":"1d5cdc8f08ae8466fb784c47f39fc69136e4c211b8b0c851aa095272b8270acb"} Jan 29 12:28:27 crc kubenswrapper[4753]: I0129 12:28:27.815274 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-ring-rebalance-debug-ch5l8" Jan 29 12:28:27 crc kubenswrapper[4753]: I0129 12:28:27.842062 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/cf9cdf86-3873-41b5-be7c-2aeae3201b49-ring-data-devices\") pod \"cf9cdf86-3873-41b5-be7c-2aeae3201b49\" (UID: \"cf9cdf86-3873-41b5-be7c-2aeae3201b49\") " Jan 29 12:28:27 crc kubenswrapper[4753]: I0129 12:28:27.842147 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/cf9cdf86-3873-41b5-be7c-2aeae3201b49-scripts\") pod \"cf9cdf86-3873-41b5-be7c-2aeae3201b49\" (UID: \"cf9cdf86-3873-41b5-be7c-2aeae3201b49\") " Jan 29 12:28:27 crc kubenswrapper[4753]: I0129 12:28:27.842174 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/cf9cdf86-3873-41b5-be7c-2aeae3201b49-dispersionconf\") pod \"cf9cdf86-3873-41b5-be7c-2aeae3201b49\" (UID: \"cf9cdf86-3873-41b5-be7c-2aeae3201b49\") " Jan 29 12:28:27 crc kubenswrapper[4753]: I0129 12:28:27.842201 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d7kd\" (UniqueName: \"kubernetes.io/projected/cf9cdf86-3873-41b5-be7c-2aeae3201b49-kube-api-access-4d7kd\") pod \"cf9cdf86-3873-41b5-be7c-2aeae3201b49\" (UID: \"cf9cdf86-3873-41b5-be7c-2aeae3201b49\") " Jan 29 12:28:27 crc kubenswrapper[4753]: I0129 12:28:27.842255 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/cf9cdf86-3873-41b5-be7c-2aeae3201b49-swiftconf\") pod \"cf9cdf86-3873-41b5-be7c-2aeae3201b49\" (UID: \"cf9cdf86-3873-41b5-be7c-2aeae3201b49\") " Jan 29 12:28:27 crc kubenswrapper[4753]: I0129 12:28:27.842326 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/cf9cdf86-3873-41b5-be7c-2aeae3201b49-etc-swift\") pod \"cf9cdf86-3873-41b5-be7c-2aeae3201b49\" (UID: \"cf9cdf86-3873-41b5-be7c-2aeae3201b49\") " Jan 29 12:28:27 crc kubenswrapper[4753]: I0129 12:28:27.842621 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cf9cdf86-3873-41b5-be7c-2aeae3201b49-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "cf9cdf86-3873-41b5-be7c-2aeae3201b49" (UID: "cf9cdf86-3873-41b5-be7c-2aeae3201b49"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:28:27 crc kubenswrapper[4753]: I0129 12:28:27.844094 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cf9cdf86-3873-41b5-be7c-2aeae3201b49-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "cf9cdf86-3873-41b5-be7c-2aeae3201b49" (UID: "cf9cdf86-3873-41b5-be7c-2aeae3201b49"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:28:27 crc kubenswrapper[4753]: I0129 12:28:27.849148 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cf9cdf86-3873-41b5-be7c-2aeae3201b49-kube-api-access-4d7kd" (OuterVolumeSpecName: "kube-api-access-4d7kd") pod "cf9cdf86-3873-41b5-be7c-2aeae3201b49" (UID: "cf9cdf86-3873-41b5-be7c-2aeae3201b49"). InnerVolumeSpecName "kube-api-access-4d7kd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:28:27 crc kubenswrapper[4753]: I0129 12:28:27.862129 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["swift-kuttl-tests/swift-ring-rebalance-debug-ch5l8"] Jan 29 12:28:27 crc kubenswrapper[4753]: I0129 12:28:27.867780 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cf9cdf86-3873-41b5-be7c-2aeae3201b49-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "cf9cdf86-3873-41b5-be7c-2aeae3201b49" (UID: "cf9cdf86-3873-41b5-be7c-2aeae3201b49"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:28:27 crc kubenswrapper[4753]: I0129 12:28:27.868770 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cf9cdf86-3873-41b5-be7c-2aeae3201b49-scripts" (OuterVolumeSpecName: "scripts") pod "cf9cdf86-3873-41b5-be7c-2aeae3201b49" (UID: "cf9cdf86-3873-41b5-be7c-2aeae3201b49"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:28:27 crc kubenswrapper[4753]: I0129 12:28:27.870099 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["swift-kuttl-tests/swift-ring-rebalance-debug-ch5l8"] Jan 29 12:28:27 crc kubenswrapper[4753]: I0129 12:28:27.876444 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cf9cdf86-3873-41b5-be7c-2aeae3201b49-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "cf9cdf86-3873-41b5-be7c-2aeae3201b49" (UID: "cf9cdf86-3873-41b5-be7c-2aeae3201b49"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:28:27 crc kubenswrapper[4753]: I0129 12:28:27.905516 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cf9cdf86-3873-41b5-be7c-2aeae3201b49" path="/var/lib/kubelet/pods/cf9cdf86-3873-41b5-be7c-2aeae3201b49/volumes" Jan 29 12:28:27 crc kubenswrapper[4753]: I0129 12:28:27.945723 4753 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/cf9cdf86-3873-41b5-be7c-2aeae3201b49-etc-swift\") on node \"crc\" DevicePath \"\"" Jan 29 12:28:27 crc kubenswrapper[4753]: I0129 12:28:27.945762 4753 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/cf9cdf86-3873-41b5-be7c-2aeae3201b49-ring-data-devices\") on node \"crc\" DevicePath \"\"" Jan 29 12:28:27 crc kubenswrapper[4753]: I0129 12:28:27.945774 4753 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/cf9cdf86-3873-41b5-be7c-2aeae3201b49-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 12:28:27 crc kubenswrapper[4753]: I0129 12:28:27.945783 4753 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/cf9cdf86-3873-41b5-be7c-2aeae3201b49-dispersionconf\") on node \"crc\" DevicePath \"\"" Jan 29 12:28:27 crc kubenswrapper[4753]: I0129 12:28:27.945792 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d7kd\" (UniqueName: \"kubernetes.io/projected/cf9cdf86-3873-41b5-be7c-2aeae3201b49-kube-api-access-4d7kd\") on node \"crc\" DevicePath \"\"" Jan 29 12:28:27 crc kubenswrapper[4753]: I0129 12:28:27.945802 4753 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/cf9cdf86-3873-41b5-be7c-2aeae3201b49-swiftconf\") on node \"crc\" DevicePath \"\"" Jan 29 12:28:27 crc kubenswrapper[4753]: I0129 12:28:27.981934 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["swift-kuttl-tests/swift-storage-0"] Jan 29 12:28:27 crc kubenswrapper[4753]: I0129 12:28:27.982907 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-0" podUID="39385d87-5895-47b3-8fa4-dff5549bca97" containerName="account-server" containerID="cri-o://3726b25caf518cbf35e2f777e83e1ecfee7780f3edbad81b396ea283f5da3a4a" gracePeriod=30 Jan 29 12:28:27 crc kubenswrapper[4753]: I0129 12:28:27.983557 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-0" podUID="39385d87-5895-47b3-8fa4-dff5549bca97" containerName="swift-recon-cron" containerID="cri-o://e7da666c1e8d2b0689ffa24259c62b01b913567e00d5b3ed3b19dd77e0c6c533" gracePeriod=30 Jan 29 12:28:27 crc kubenswrapper[4753]: I0129 12:28:27.983752 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-0" podUID="39385d87-5895-47b3-8fa4-dff5549bca97" containerName="rsync" containerID="cri-o://955915dd25543e1efdb915a3879096fba3d38186fe0b1844ff742a04165dbcab" gracePeriod=30 Jan 29 12:28:27 crc kubenswrapper[4753]: I0129 12:28:27.983930 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-0" podUID="39385d87-5895-47b3-8fa4-dff5549bca97" containerName="object-expirer" containerID="cri-o://60fba7ddfb790e3ab32a5fcb0f5930bccf040407ee1755fd70b5ef489d123112" gracePeriod=30 Jan 29 12:28:27 crc kubenswrapper[4753]: I0129 12:28:27.984092 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-0" podUID="39385d87-5895-47b3-8fa4-dff5549bca97" containerName="object-updater" containerID="cri-o://896609a3bbf20ce8ed0c2121726b125b6eb828e48157cc650e81099744c07e6a" gracePeriod=30 Jan 29 12:28:27 crc kubenswrapper[4753]: I0129 12:28:27.984258 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-0" podUID="39385d87-5895-47b3-8fa4-dff5549bca97" containerName="object-auditor" containerID="cri-o://b5dbc9e432cab22467e73f936a4bd9ed835e89758f8d7b9248aa062527139167" gracePeriod=30 Jan 29 12:28:27 crc kubenswrapper[4753]: I0129 12:28:27.984405 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-0" podUID="39385d87-5895-47b3-8fa4-dff5549bca97" containerName="object-replicator" containerID="cri-o://f600c9d18c7bef57d4cdc5e5e4f2ef9ad5b634df5b0c23fc5d34c56cbc0abc7c" gracePeriod=30 Jan 29 12:28:27 crc kubenswrapper[4753]: I0129 12:28:27.984572 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-0" podUID="39385d87-5895-47b3-8fa4-dff5549bca97" containerName="object-server" containerID="cri-o://f512c2300a33e2cb5b259c3418f458b2d6f86634ccce39b9adee884ae693a404" gracePeriod=30 Jan 29 12:28:27 crc kubenswrapper[4753]: I0129 12:28:27.984732 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-0" podUID="39385d87-5895-47b3-8fa4-dff5549bca97" containerName="container-updater" containerID="cri-o://737616607dae86265208ff6f145edb0ce8b20dba96eda0daf445337b0fef1935" gracePeriod=30 Jan 29 12:28:27 crc kubenswrapper[4753]: I0129 12:28:27.984866 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-0" podUID="39385d87-5895-47b3-8fa4-dff5549bca97" containerName="container-auditor" containerID="cri-o://75aca0fed29877eb2ade05283d8faf4a4c3f794d44c85614b217f1bf2ae5b2ce" gracePeriod=30 Jan 29 12:28:27 crc kubenswrapper[4753]: I0129 12:28:27.984995 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-0" podUID="39385d87-5895-47b3-8fa4-dff5549bca97" containerName="container-replicator" containerID="cri-o://76c9500d039ba3a69ffb165677dd2e12bdadadeb261817ebd10496ce731615ce" gracePeriod=30 Jan 29 12:28:27 crc kubenswrapper[4753]: I0129 12:28:27.985124 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-0" podUID="39385d87-5895-47b3-8fa4-dff5549bca97" containerName="container-server" containerID="cri-o://167d4dbddcf9e32fccd34fe86b69996a5f6c260ba7e291f34f64d24b772b8ac8" gracePeriod=30 Jan 29 12:28:27 crc kubenswrapper[4753]: I0129 12:28:27.985280 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-0" podUID="39385d87-5895-47b3-8fa4-dff5549bca97" containerName="account-reaper" containerID="cri-o://984e7ec23fbb1ea4d65de4f32027ca929ec222f93bf54d3a02596d29a4717385" gracePeriod=30 Jan 29 12:28:27 crc kubenswrapper[4753]: I0129 12:28:27.985442 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-0" podUID="39385d87-5895-47b3-8fa4-dff5549bca97" containerName="account-auditor" containerID="cri-o://063062d207c79c3b7f996082ef0d8f966b55a5285627a39041e569d21584423f" gracePeriod=30 Jan 29 12:28:27 crc kubenswrapper[4753]: I0129 12:28:27.985584 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-0" podUID="39385d87-5895-47b3-8fa4-dff5549bca97" containerName="account-replicator" containerID="cri-o://af0f9b6fe749f96595fa413d19eb4d05d225aa607b0bdca7f6249d9c3c82af97" gracePeriod=30 Jan 29 12:28:27 crc kubenswrapper[4753]: I0129 12:28:27.993437 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["swift-kuttl-tests/swift-storage-1"] Jan 29 12:28:27 crc kubenswrapper[4753]: I0129 12:28:27.995902 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-1" podUID="ddeb279d-2025-4326-8e65-865d310f7c0a" containerName="account-server" containerID="cri-o://e2f1954c5859377fa2807abc43754082b700caff19ed4069e8ed7c2b84149b60" gracePeriod=30 Jan 29 12:28:27 crc kubenswrapper[4753]: I0129 12:28:27.996104 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-1" podUID="ddeb279d-2025-4326-8e65-865d310f7c0a" containerName="object-server" containerID="cri-o://78e57805ad2975127bfcf29509b23f4d1294dcfd0848150a2111e6b61c4ddfba" gracePeriod=30 Jan 29 12:28:27 crc kubenswrapper[4753]: I0129 12:28:27.996145 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-1" podUID="ddeb279d-2025-4326-8e65-865d310f7c0a" containerName="swift-recon-cron" containerID="cri-o://8e356a88be616efe8f74f65c56281ee6e48af8b76b2a36f9cf9de679f1c7b6b8" gracePeriod=30 Jan 29 12:28:27 crc kubenswrapper[4753]: I0129 12:28:27.996163 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-1" podUID="ddeb279d-2025-4326-8e65-865d310f7c0a" containerName="rsync" containerID="cri-o://40e6b5dc9cea06540198cf963e5c70e91ace9da403911908c871f752fe076bc2" gracePeriod=30 Jan 29 12:28:27 crc kubenswrapper[4753]: I0129 12:28:27.996172 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-1" podUID="ddeb279d-2025-4326-8e65-865d310f7c0a" containerName="object-expirer" containerID="cri-o://a60f6b51faf65756a3c0471962b2bdf525b7e3029c0357dd153d2da4daa57b8a" gracePeriod=30 Jan 29 12:28:27 crc kubenswrapper[4753]: I0129 12:28:27.996183 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-1" podUID="ddeb279d-2025-4326-8e65-865d310f7c0a" containerName="object-updater" containerID="cri-o://13d565afdd53734d519d75262cc241dad905071cb7825483fd615878fa346029" gracePeriod=30 Jan 29 12:28:27 crc kubenswrapper[4753]: I0129 12:28:27.996193 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-1" podUID="ddeb279d-2025-4326-8e65-865d310f7c0a" containerName="object-auditor" containerID="cri-o://74e579bb81dd2d0a90b87f4e8962a16fb2e6352c7384af62591ce4ac200cc268" gracePeriod=30 Jan 29 12:28:27 crc kubenswrapper[4753]: I0129 12:28:27.996203 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-1" podUID="ddeb279d-2025-4326-8e65-865d310f7c0a" containerName="object-replicator" containerID="cri-o://7a17416ceba41ee4beed6c5cd6e769268f0fb17d13e4cf402b2974089c52b3c5" gracePeriod=30 Jan 29 12:28:27 crc kubenswrapper[4753]: I0129 12:28:27.996217 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-1" podUID="ddeb279d-2025-4326-8e65-865d310f7c0a" containerName="container-server" containerID="cri-o://ab8ab1b0296c8d63c94fe76b9be4dff85e2c3d3c5e119f4c61a4e952a3eacb57" gracePeriod=30 Jan 29 12:28:27 crc kubenswrapper[4753]: I0129 12:28:27.996245 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-1" podUID="ddeb279d-2025-4326-8e65-865d310f7c0a" containerName="container-updater" containerID="cri-o://e35b583e269bd87ef2536a7423d7e23d108405a6dac8032ce67f1432845ac9ee" gracePeriod=30 Jan 29 12:28:27 crc kubenswrapper[4753]: I0129 12:28:27.996259 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-1" podUID="ddeb279d-2025-4326-8e65-865d310f7c0a" containerName="container-auditor" containerID="cri-o://39cbef45410dc33c3ccd280dd8dbd93028f01d71d87119a2009d38d2fab5647f" gracePeriod=30 Jan 29 12:28:27 crc kubenswrapper[4753]: I0129 12:28:27.996269 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-1" podUID="ddeb279d-2025-4326-8e65-865d310f7c0a" containerName="container-replicator" containerID="cri-o://c2c67d359cc4c35fc1b93d25f31dd1acbe50d819456e7ba45edbca5cc66b76ad" gracePeriod=30 Jan 29 12:28:28 crc kubenswrapper[4753]: I0129 12:28:27.996280 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-1" podUID="ddeb279d-2025-4326-8e65-865d310f7c0a" containerName="account-auditor" containerID="cri-o://8066888e8c4974033e42ad712726fb89b198041760f9941ccf400414fe67f7c4" gracePeriod=30 Jan 29 12:28:28 crc kubenswrapper[4753]: I0129 12:28:27.996292 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-1" podUID="ddeb279d-2025-4326-8e65-865d310f7c0a" containerName="account-reaper" containerID="cri-o://c02e393358bd7fc72cbcc938357a001d8e45a3614c97f8c21ef2b425a11ea56c" gracePeriod=30 Jan 29 12:28:28 crc kubenswrapper[4753]: I0129 12:28:27.996303 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-1" podUID="ddeb279d-2025-4326-8e65-865d310f7c0a" containerName="account-replicator" containerID="cri-o://62808980bc02e825395831219271d7565dbce4f504194b0b1d24f3ec68d29c7a" gracePeriod=30 Jan 29 12:28:28 crc kubenswrapper[4753]: I0129 12:28:28.011676 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["swift-kuttl-tests/swift-storage-2"] Jan 29 12:28:28 crc kubenswrapper[4753]: I0129 12:28:28.012218 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-2" podUID="32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc" containerName="account-server" containerID="cri-o://fe8f9ebcc1c86411e3bb093c67fb21d93a96fee55951ceee7e3131efeb068618" gracePeriod=30 Jan 29 12:28:28 crc kubenswrapper[4753]: I0129 12:28:28.012386 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-2" podUID="32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc" containerName="swift-recon-cron" containerID="cri-o://089bfd5f212edcb63517b6be2c9e93e85788d0a36322b33d159dc4276eeffa8c" gracePeriod=30 Jan 29 12:28:28 crc kubenswrapper[4753]: I0129 12:28:28.012442 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-2" podUID="32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc" containerName="rsync" containerID="cri-o://263c42f493bd95e422bf98dec8882fa3d3524e56d9ce4bc9f9ecf43ceb8c3a2e" gracePeriod=30 Jan 29 12:28:28 crc kubenswrapper[4753]: I0129 12:28:28.012487 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-2" podUID="32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc" containerName="object-expirer" containerID="cri-o://e886a1fa21080f1c391ce8ba831cdb18c9181775550b92bbd69711a723a6c2ca" gracePeriod=30 Jan 29 12:28:28 crc kubenswrapper[4753]: I0129 12:28:28.012537 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-2" podUID="32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc" containerName="container-replicator" containerID="cri-o://7205dfe02ab43d1f70f31ef2ec36df92f3b6ddfe4b58c079fa1bef08461d8f7d" gracePeriod=30 Jan 29 12:28:28 crc kubenswrapper[4753]: I0129 12:28:28.012582 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-2" podUID="32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc" containerName="container-server" containerID="cri-o://00c1eb87259c9f9ca790410d154830681a972b41be2f543d941b16c768d02c7a" gracePeriod=30 Jan 29 12:28:28 crc kubenswrapper[4753]: I0129 12:28:28.012574 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-2" podUID="32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc" containerName="container-auditor" containerID="cri-o://c61d771f239c1ddb78ae193005b7f28411347094488416d74c36b5a4e1af184e" gracePeriod=30 Jan 29 12:28:28 crc kubenswrapper[4753]: I0129 12:28:28.012642 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-2" podUID="32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc" containerName="account-auditor" containerID="cri-o://0fdf7c43e207058700148694e8d0512172a5c08367d82685ee34cb21a7d777bb" gracePeriod=30 Jan 29 12:28:28 crc kubenswrapper[4753]: I0129 12:28:28.012677 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-2" podUID="32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc" containerName="account-replicator" containerID="cri-o://ffb8afe70efce4899f28e6a84a2da82ec2152af28d185ef2246910c3630e20af" gracePeriod=30 Jan 29 12:28:28 crc kubenswrapper[4753]: I0129 12:28:28.012627 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-2" podUID="32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc" containerName="account-reaper" containerID="cri-o://4be3d87956e4951b9b943798ec79dccb05d39a88a8d1c684b6af2f4d1d49dd8a" gracePeriod=30 Jan 29 12:28:28 crc kubenswrapper[4753]: I0129 12:28:28.012815 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-2" podUID="32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc" containerName="object-updater" containerID="cri-o://ff839774eea656b737782f6cc51d5decbaa50e4fcaeed6ad6d8c0f31282f312e" gracePeriod=30 Jan 29 12:28:28 crc kubenswrapper[4753]: I0129 12:28:28.012863 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-2" podUID="32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc" containerName="object-replicator" containerID="cri-o://8c90ac4f21fbe6fd19be163628758ffd03ac6be361d6e346e128041b97d6eb05" gracePeriod=30 Jan 29 12:28:28 crc kubenswrapper[4753]: I0129 12:28:28.012895 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-2" podUID="32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc" containerName="object-server" containerID="cri-o://b601e419b6959550e2118567697a445d92d4c7c854603a5c05ad0a8bd8840b30" gracePeriod=30 Jan 29 12:28:28 crc kubenswrapper[4753]: I0129 12:28:28.012953 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-2" podUID="32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc" containerName="container-updater" containerID="cri-o://ff3a6b0d2ffa6545d5a3788e7c7dea23a83af8af68f1a3dbb4abcd33d05b9901" gracePeriod=30 Jan 29 12:28:28 crc kubenswrapper[4753]: I0129 12:28:28.012946 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-2" podUID="32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc" containerName="object-auditor" containerID="cri-o://400a061c4bd0a357658de03c8e6bf769bb555b258f4dc874056c3bc75b8d5f62" gracePeriod=30 Jan 29 12:28:28 crc kubenswrapper[4753]: I0129 12:28:28.036155 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["swift-kuttl-tests/swift-ring-rebalance-rl5q8"] Jan 29 12:28:28 crc kubenswrapper[4753]: I0129 12:28:28.062190 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["swift-kuttl-tests/swift-ring-rebalance-rl5q8"] Jan 29 12:28:28 crc kubenswrapper[4753]: I0129 12:28:28.083538 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["swift-kuttl-tests/swift-proxy-6bc96d68cf-vw4t2"] Jan 29 12:28:28 crc kubenswrapper[4753]: I0129 12:28:28.083841 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-proxy-6bc96d68cf-vw4t2" podUID="1279ea7f-350d-4657-8d79-d384e6974700" containerName="proxy-httpd" containerID="cri-o://f9e68885ed21f11c82942f493d4bec986cbcb9cfc313e2a9c0c92797057fe94f" gracePeriod=30 Jan 29 12:28:28 crc kubenswrapper[4753]: I0129 12:28:28.084408 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-proxy-6bc96d68cf-vw4t2" podUID="1279ea7f-350d-4657-8d79-d384e6974700" containerName="proxy-server" containerID="cri-o://9aa82879a77a70118ab711b5e80d710b9819382b80bdf7b5852721678330b2b2" gracePeriod=30 Jan 29 12:28:28 crc kubenswrapper[4753]: I0129 12:28:28.538551 4753 generic.go:334] "Generic (PLEG): container finished" podID="32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc" containerID="e886a1fa21080f1c391ce8ba831cdb18c9181775550b92bbd69711a723a6c2ca" exitCode=0 Jan 29 12:28:28 crc kubenswrapper[4753]: I0129 12:28:28.538888 4753 generic.go:334] "Generic (PLEG): container finished" podID="32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc" containerID="ff839774eea656b737782f6cc51d5decbaa50e4fcaeed6ad6d8c0f31282f312e" exitCode=0 Jan 29 12:28:28 crc kubenswrapper[4753]: I0129 12:28:28.538901 4753 generic.go:334] "Generic (PLEG): container finished" podID="32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc" containerID="400a061c4bd0a357658de03c8e6bf769bb555b258f4dc874056c3bc75b8d5f62" exitCode=0 Jan 29 12:28:28 crc kubenswrapper[4753]: I0129 12:28:28.538909 4753 generic.go:334] "Generic (PLEG): container finished" podID="32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc" containerID="8c90ac4f21fbe6fd19be163628758ffd03ac6be361d6e346e128041b97d6eb05" exitCode=0 Jan 29 12:28:28 crc kubenswrapper[4753]: I0129 12:28:28.538917 4753 generic.go:334] "Generic (PLEG): container finished" podID="32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc" containerID="ff3a6b0d2ffa6545d5a3788e7c7dea23a83af8af68f1a3dbb4abcd33d05b9901" exitCode=0 Jan 29 12:28:28 crc kubenswrapper[4753]: I0129 12:28:28.538926 4753 generic.go:334] "Generic (PLEG): container finished" podID="32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc" containerID="c61d771f239c1ddb78ae193005b7f28411347094488416d74c36b5a4e1af184e" exitCode=0 Jan 29 12:28:28 crc kubenswrapper[4753]: I0129 12:28:28.538934 4753 generic.go:334] "Generic (PLEG): container finished" podID="32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc" containerID="7205dfe02ab43d1f70f31ef2ec36df92f3b6ddfe4b58c079fa1bef08461d8f7d" exitCode=0 Jan 29 12:28:28 crc kubenswrapper[4753]: I0129 12:28:28.538942 4753 generic.go:334] "Generic (PLEG): container finished" podID="32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc" containerID="4be3d87956e4951b9b943798ec79dccb05d39a88a8d1c684b6af2f4d1d49dd8a" exitCode=0 Jan 29 12:28:28 crc kubenswrapper[4753]: I0129 12:28:28.538952 4753 generic.go:334] "Generic (PLEG): container finished" podID="32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc" containerID="0fdf7c43e207058700148694e8d0512172a5c08367d82685ee34cb21a7d777bb" exitCode=0 Jan 29 12:28:28 crc kubenswrapper[4753]: I0129 12:28:28.538959 4753 generic.go:334] "Generic (PLEG): container finished" podID="32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc" containerID="ffb8afe70efce4899f28e6a84a2da82ec2152af28d185ef2246910c3630e20af" exitCode=0 Jan 29 12:28:28 crc kubenswrapper[4753]: I0129 12:28:28.539078 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc","Type":"ContainerDied","Data":"e886a1fa21080f1c391ce8ba831cdb18c9181775550b92bbd69711a723a6c2ca"} Jan 29 12:28:28 crc kubenswrapper[4753]: I0129 12:28:28.539114 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc","Type":"ContainerDied","Data":"ff839774eea656b737782f6cc51d5decbaa50e4fcaeed6ad6d8c0f31282f312e"} Jan 29 12:28:28 crc kubenswrapper[4753]: I0129 12:28:28.539129 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc","Type":"ContainerDied","Data":"400a061c4bd0a357658de03c8e6bf769bb555b258f4dc874056c3bc75b8d5f62"} Jan 29 12:28:28 crc kubenswrapper[4753]: I0129 12:28:28.539139 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc","Type":"ContainerDied","Data":"8c90ac4f21fbe6fd19be163628758ffd03ac6be361d6e346e128041b97d6eb05"} Jan 29 12:28:28 crc kubenswrapper[4753]: I0129 12:28:28.539148 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc","Type":"ContainerDied","Data":"ff3a6b0d2ffa6545d5a3788e7c7dea23a83af8af68f1a3dbb4abcd33d05b9901"} Jan 29 12:28:28 crc kubenswrapper[4753]: I0129 12:28:28.539159 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc","Type":"ContainerDied","Data":"c61d771f239c1ddb78ae193005b7f28411347094488416d74c36b5a4e1af184e"} Jan 29 12:28:28 crc kubenswrapper[4753]: I0129 12:28:28.539172 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc","Type":"ContainerDied","Data":"7205dfe02ab43d1f70f31ef2ec36df92f3b6ddfe4b58c079fa1bef08461d8f7d"} Jan 29 12:28:28 crc kubenswrapper[4753]: I0129 12:28:28.539201 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc","Type":"ContainerDied","Data":"4be3d87956e4951b9b943798ec79dccb05d39a88a8d1c684b6af2f4d1d49dd8a"} Jan 29 12:28:28 crc kubenswrapper[4753]: I0129 12:28:28.539211 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc","Type":"ContainerDied","Data":"0fdf7c43e207058700148694e8d0512172a5c08367d82685ee34cb21a7d777bb"} Jan 29 12:28:28 crc kubenswrapper[4753]: I0129 12:28:28.539244 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc","Type":"ContainerDied","Data":"ffb8afe70efce4899f28e6a84a2da82ec2152af28d185ef2246910c3630e20af"} Jan 29 12:28:28 crc kubenswrapper[4753]: I0129 12:28:28.579531 4753 generic.go:334] "Generic (PLEG): container finished" podID="ddeb279d-2025-4326-8e65-865d310f7c0a" containerID="a60f6b51faf65756a3c0471962b2bdf525b7e3029c0357dd153d2da4daa57b8a" exitCode=0 Jan 29 12:28:28 crc kubenswrapper[4753]: I0129 12:28:28.579564 4753 generic.go:334] "Generic (PLEG): container finished" podID="ddeb279d-2025-4326-8e65-865d310f7c0a" containerID="13d565afdd53734d519d75262cc241dad905071cb7825483fd615878fa346029" exitCode=0 Jan 29 12:28:28 crc kubenswrapper[4753]: I0129 12:28:28.579574 4753 generic.go:334] "Generic (PLEG): container finished" podID="ddeb279d-2025-4326-8e65-865d310f7c0a" containerID="74e579bb81dd2d0a90b87f4e8962a16fb2e6352c7384af62591ce4ac200cc268" exitCode=0 Jan 29 12:28:28 crc kubenswrapper[4753]: I0129 12:28:28.579583 4753 generic.go:334] "Generic (PLEG): container finished" podID="ddeb279d-2025-4326-8e65-865d310f7c0a" containerID="7a17416ceba41ee4beed6c5cd6e769268f0fb17d13e4cf402b2974089c52b3c5" exitCode=0 Jan 29 12:28:28 crc kubenswrapper[4753]: I0129 12:28:28.579591 4753 generic.go:334] "Generic (PLEG): container finished" podID="ddeb279d-2025-4326-8e65-865d310f7c0a" containerID="e35b583e269bd87ef2536a7423d7e23d108405a6dac8032ce67f1432845ac9ee" exitCode=0 Jan 29 12:28:28 crc kubenswrapper[4753]: I0129 12:28:28.579599 4753 generic.go:334] "Generic (PLEG): container finished" podID="ddeb279d-2025-4326-8e65-865d310f7c0a" containerID="39cbef45410dc33c3ccd280dd8dbd93028f01d71d87119a2009d38d2fab5647f" exitCode=0 Jan 29 12:28:28 crc kubenswrapper[4753]: I0129 12:28:28.579605 4753 generic.go:334] "Generic (PLEG): container finished" podID="ddeb279d-2025-4326-8e65-865d310f7c0a" containerID="c2c67d359cc4c35fc1b93d25f31dd1acbe50d819456e7ba45edbca5cc66b76ad" exitCode=0 Jan 29 12:28:28 crc kubenswrapper[4753]: I0129 12:28:28.579611 4753 generic.go:334] "Generic (PLEG): container finished" podID="ddeb279d-2025-4326-8e65-865d310f7c0a" containerID="c02e393358bd7fc72cbcc938357a001d8e45a3614c97f8c21ef2b425a11ea56c" exitCode=0 Jan 29 12:28:28 crc kubenswrapper[4753]: I0129 12:28:28.579617 4753 generic.go:334] "Generic (PLEG): container finished" podID="ddeb279d-2025-4326-8e65-865d310f7c0a" containerID="8066888e8c4974033e42ad712726fb89b198041760f9941ccf400414fe67f7c4" exitCode=0 Jan 29 12:28:28 crc kubenswrapper[4753]: I0129 12:28:28.579625 4753 generic.go:334] "Generic (PLEG): container finished" podID="ddeb279d-2025-4326-8e65-865d310f7c0a" containerID="62808980bc02e825395831219271d7565dbce4f504194b0b1d24f3ec68d29c7a" exitCode=0 Jan 29 12:28:28 crc kubenswrapper[4753]: I0129 12:28:28.579670 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"ddeb279d-2025-4326-8e65-865d310f7c0a","Type":"ContainerDied","Data":"a60f6b51faf65756a3c0471962b2bdf525b7e3029c0357dd153d2da4daa57b8a"} Jan 29 12:28:28 crc kubenswrapper[4753]: I0129 12:28:28.579701 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"ddeb279d-2025-4326-8e65-865d310f7c0a","Type":"ContainerDied","Data":"13d565afdd53734d519d75262cc241dad905071cb7825483fd615878fa346029"} Jan 29 12:28:28 crc kubenswrapper[4753]: I0129 12:28:28.579723 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"ddeb279d-2025-4326-8e65-865d310f7c0a","Type":"ContainerDied","Data":"74e579bb81dd2d0a90b87f4e8962a16fb2e6352c7384af62591ce4ac200cc268"} Jan 29 12:28:28 crc kubenswrapper[4753]: I0129 12:28:28.579732 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"ddeb279d-2025-4326-8e65-865d310f7c0a","Type":"ContainerDied","Data":"7a17416ceba41ee4beed6c5cd6e769268f0fb17d13e4cf402b2974089c52b3c5"} Jan 29 12:28:28 crc kubenswrapper[4753]: I0129 12:28:28.579741 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"ddeb279d-2025-4326-8e65-865d310f7c0a","Type":"ContainerDied","Data":"e35b583e269bd87ef2536a7423d7e23d108405a6dac8032ce67f1432845ac9ee"} Jan 29 12:28:28 crc kubenswrapper[4753]: I0129 12:28:28.579751 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"ddeb279d-2025-4326-8e65-865d310f7c0a","Type":"ContainerDied","Data":"39cbef45410dc33c3ccd280dd8dbd93028f01d71d87119a2009d38d2fab5647f"} Jan 29 12:28:28 crc kubenswrapper[4753]: I0129 12:28:28.579758 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"ddeb279d-2025-4326-8e65-865d310f7c0a","Type":"ContainerDied","Data":"c2c67d359cc4c35fc1b93d25f31dd1acbe50d819456e7ba45edbca5cc66b76ad"} Jan 29 12:28:28 crc kubenswrapper[4753]: I0129 12:28:28.579766 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"ddeb279d-2025-4326-8e65-865d310f7c0a","Type":"ContainerDied","Data":"c02e393358bd7fc72cbcc938357a001d8e45a3614c97f8c21ef2b425a11ea56c"} Jan 29 12:28:28 crc kubenswrapper[4753]: I0129 12:28:28.579774 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"ddeb279d-2025-4326-8e65-865d310f7c0a","Type":"ContainerDied","Data":"8066888e8c4974033e42ad712726fb89b198041760f9941ccf400414fe67f7c4"} Jan 29 12:28:28 crc kubenswrapper[4753]: I0129 12:28:28.579781 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"ddeb279d-2025-4326-8e65-865d310f7c0a","Type":"ContainerDied","Data":"62808980bc02e825395831219271d7565dbce4f504194b0b1d24f3ec68d29c7a"} Jan 29 12:28:28 crc kubenswrapper[4753]: I0129 12:28:28.590424 4753 generic.go:334] "Generic (PLEG): container finished" podID="39385d87-5895-47b3-8fa4-dff5549bca97" containerID="955915dd25543e1efdb915a3879096fba3d38186fe0b1844ff742a04165dbcab" exitCode=0 Jan 29 12:28:28 crc kubenswrapper[4753]: I0129 12:28:28.590460 4753 generic.go:334] "Generic (PLEG): container finished" podID="39385d87-5895-47b3-8fa4-dff5549bca97" containerID="60fba7ddfb790e3ab32a5fcb0f5930bccf040407ee1755fd70b5ef489d123112" exitCode=0 Jan 29 12:28:28 crc kubenswrapper[4753]: I0129 12:28:28.590467 4753 generic.go:334] "Generic (PLEG): container finished" podID="39385d87-5895-47b3-8fa4-dff5549bca97" containerID="896609a3bbf20ce8ed0c2121726b125b6eb828e48157cc650e81099744c07e6a" exitCode=0 Jan 29 12:28:28 crc kubenswrapper[4753]: I0129 12:28:28.590475 4753 generic.go:334] "Generic (PLEG): container finished" podID="39385d87-5895-47b3-8fa4-dff5549bca97" containerID="b5dbc9e432cab22467e73f936a4bd9ed835e89758f8d7b9248aa062527139167" exitCode=0 Jan 29 12:28:28 crc kubenswrapper[4753]: I0129 12:28:28.590482 4753 generic.go:334] "Generic (PLEG): container finished" podID="39385d87-5895-47b3-8fa4-dff5549bca97" containerID="f600c9d18c7bef57d4cdc5e5e4f2ef9ad5b634df5b0c23fc5d34c56cbc0abc7c" exitCode=0 Jan 29 12:28:28 crc kubenswrapper[4753]: I0129 12:28:28.590487 4753 generic.go:334] "Generic (PLEG): container finished" podID="39385d87-5895-47b3-8fa4-dff5549bca97" containerID="737616607dae86265208ff6f145edb0ce8b20dba96eda0daf445337b0fef1935" exitCode=0 Jan 29 12:28:28 crc kubenswrapper[4753]: I0129 12:28:28.590493 4753 generic.go:334] "Generic (PLEG): container finished" podID="39385d87-5895-47b3-8fa4-dff5549bca97" containerID="75aca0fed29877eb2ade05283d8faf4a4c3f794d44c85614b217f1bf2ae5b2ce" exitCode=0 Jan 29 12:28:28 crc kubenswrapper[4753]: I0129 12:28:28.590499 4753 generic.go:334] "Generic (PLEG): container finished" podID="39385d87-5895-47b3-8fa4-dff5549bca97" containerID="76c9500d039ba3a69ffb165677dd2e12bdadadeb261817ebd10496ce731615ce" exitCode=0 Jan 29 12:28:28 crc kubenswrapper[4753]: I0129 12:28:28.590505 4753 generic.go:334] "Generic (PLEG): container finished" podID="39385d87-5895-47b3-8fa4-dff5549bca97" containerID="984e7ec23fbb1ea4d65de4f32027ca929ec222f93bf54d3a02596d29a4717385" exitCode=0 Jan 29 12:28:28 crc kubenswrapper[4753]: I0129 12:28:28.590511 4753 generic.go:334] "Generic (PLEG): container finished" podID="39385d87-5895-47b3-8fa4-dff5549bca97" containerID="063062d207c79c3b7f996082ef0d8f966b55a5285627a39041e569d21584423f" exitCode=0 Jan 29 12:28:28 crc kubenswrapper[4753]: I0129 12:28:28.590518 4753 generic.go:334] "Generic (PLEG): container finished" podID="39385d87-5895-47b3-8fa4-dff5549bca97" containerID="af0f9b6fe749f96595fa413d19eb4d05d225aa607b0bdca7f6249d9c3c82af97" exitCode=0 Jan 29 12:28:28 crc kubenswrapper[4753]: I0129 12:28:28.590566 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"39385d87-5895-47b3-8fa4-dff5549bca97","Type":"ContainerDied","Data":"955915dd25543e1efdb915a3879096fba3d38186fe0b1844ff742a04165dbcab"} Jan 29 12:28:28 crc kubenswrapper[4753]: I0129 12:28:28.590598 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"39385d87-5895-47b3-8fa4-dff5549bca97","Type":"ContainerDied","Data":"60fba7ddfb790e3ab32a5fcb0f5930bccf040407ee1755fd70b5ef489d123112"} Jan 29 12:28:28 crc kubenswrapper[4753]: I0129 12:28:28.590610 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"39385d87-5895-47b3-8fa4-dff5549bca97","Type":"ContainerDied","Data":"896609a3bbf20ce8ed0c2121726b125b6eb828e48157cc650e81099744c07e6a"} Jan 29 12:28:28 crc kubenswrapper[4753]: I0129 12:28:28.590618 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"39385d87-5895-47b3-8fa4-dff5549bca97","Type":"ContainerDied","Data":"b5dbc9e432cab22467e73f936a4bd9ed835e89758f8d7b9248aa062527139167"} Jan 29 12:28:28 crc kubenswrapper[4753]: I0129 12:28:28.590626 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"39385d87-5895-47b3-8fa4-dff5549bca97","Type":"ContainerDied","Data":"f600c9d18c7bef57d4cdc5e5e4f2ef9ad5b634df5b0c23fc5d34c56cbc0abc7c"} Jan 29 12:28:28 crc kubenswrapper[4753]: I0129 12:28:28.590634 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"39385d87-5895-47b3-8fa4-dff5549bca97","Type":"ContainerDied","Data":"737616607dae86265208ff6f145edb0ce8b20dba96eda0daf445337b0fef1935"} Jan 29 12:28:28 crc kubenswrapper[4753]: I0129 12:28:28.590643 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"39385d87-5895-47b3-8fa4-dff5549bca97","Type":"ContainerDied","Data":"75aca0fed29877eb2ade05283d8faf4a4c3f794d44c85614b217f1bf2ae5b2ce"} Jan 29 12:28:28 crc kubenswrapper[4753]: I0129 12:28:28.590655 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"39385d87-5895-47b3-8fa4-dff5549bca97","Type":"ContainerDied","Data":"76c9500d039ba3a69ffb165677dd2e12bdadadeb261817ebd10496ce731615ce"} Jan 29 12:28:28 crc kubenswrapper[4753]: I0129 12:28:28.590662 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"39385d87-5895-47b3-8fa4-dff5549bca97","Type":"ContainerDied","Data":"984e7ec23fbb1ea4d65de4f32027ca929ec222f93bf54d3a02596d29a4717385"} Jan 29 12:28:28 crc kubenswrapper[4753]: I0129 12:28:28.590671 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"39385d87-5895-47b3-8fa4-dff5549bca97","Type":"ContainerDied","Data":"063062d207c79c3b7f996082ef0d8f966b55a5285627a39041e569d21584423f"} Jan 29 12:28:28 crc kubenswrapper[4753]: I0129 12:28:28.590678 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"39385d87-5895-47b3-8fa4-dff5549bca97","Type":"ContainerDied","Data":"af0f9b6fe749f96595fa413d19eb4d05d225aa607b0bdca7f6249d9c3c82af97"} Jan 29 12:28:28 crc kubenswrapper[4753]: I0129 12:28:28.592141 4753 scope.go:117] "RemoveContainer" containerID="1d5cdc8f08ae8466fb784c47f39fc69136e4c211b8b0c851aa095272b8270acb" Jan 29 12:28:28 crc kubenswrapper[4753]: I0129 12:28:28.592350 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-ring-rebalance-debug-ch5l8" Jan 29 12:28:28 crc kubenswrapper[4753]: I0129 12:28:28.595959 4753 generic.go:334] "Generic (PLEG): container finished" podID="1279ea7f-350d-4657-8d79-d384e6974700" containerID="f9e68885ed21f11c82942f493d4bec986cbcb9cfc313e2a9c0c92797057fe94f" exitCode=0 Jan 29 12:28:28 crc kubenswrapper[4753]: I0129 12:28:28.596003 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-proxy-6bc96d68cf-vw4t2" event={"ID":"1279ea7f-350d-4657-8d79-d384e6974700","Type":"ContainerDied","Data":"f9e68885ed21f11c82942f493d4bec986cbcb9cfc313e2a9c0c92797057fe94f"} Jan 29 12:28:28 crc kubenswrapper[4753]: I0129 12:28:28.733255 4753 prober.go:107] "Probe failed" probeType="Readiness" pod="swift-kuttl-tests/swift-proxy-6bc96d68cf-vw4t2" podUID="1279ea7f-350d-4657-8d79-d384e6974700" containerName="proxy-server" probeResult="failure" output="Get \"http://10.217.0.97:8080/healthcheck\": dial tcp 10.217.0.97:8080: connect: connection refused" Jan 29 12:28:28 crc kubenswrapper[4753]: I0129 12:28:28.733635 4753 prober.go:107] "Probe failed" probeType="Readiness" pod="swift-kuttl-tests/swift-proxy-6bc96d68cf-vw4t2" podUID="1279ea7f-350d-4657-8d79-d384e6974700" containerName="proxy-httpd" probeResult="failure" output="Get \"http://10.217.0.97:8080/healthcheck\": dial tcp 10.217.0.97:8080: connect: connection refused" Jan 29 12:28:29 crc kubenswrapper[4753]: I0129 12:28:29.008386 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-proxy-6bc96d68cf-vw4t2" Jan 29 12:28:29 crc kubenswrapper[4753]: I0129 12:28:29.165660 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rbtfh\" (UniqueName: \"kubernetes.io/projected/1279ea7f-350d-4657-8d79-d384e6974700-kube-api-access-rbtfh\") pod \"1279ea7f-350d-4657-8d79-d384e6974700\" (UID: \"1279ea7f-350d-4657-8d79-d384e6974700\") " Jan 29 12:28:29 crc kubenswrapper[4753]: I0129 12:28:29.165719 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1279ea7f-350d-4657-8d79-d384e6974700-config-data\") pod \"1279ea7f-350d-4657-8d79-d384e6974700\" (UID: \"1279ea7f-350d-4657-8d79-d384e6974700\") " Jan 29 12:28:29 crc kubenswrapper[4753]: I0129 12:28:29.165795 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/1279ea7f-350d-4657-8d79-d384e6974700-etc-swift\") pod \"1279ea7f-350d-4657-8d79-d384e6974700\" (UID: \"1279ea7f-350d-4657-8d79-d384e6974700\") " Jan 29 12:28:29 crc kubenswrapper[4753]: I0129 12:28:29.165936 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1279ea7f-350d-4657-8d79-d384e6974700-log-httpd\") pod \"1279ea7f-350d-4657-8d79-d384e6974700\" (UID: \"1279ea7f-350d-4657-8d79-d384e6974700\") " Jan 29 12:28:29 crc kubenswrapper[4753]: I0129 12:28:29.165979 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1279ea7f-350d-4657-8d79-d384e6974700-run-httpd\") pod \"1279ea7f-350d-4657-8d79-d384e6974700\" (UID: \"1279ea7f-350d-4657-8d79-d384e6974700\") " Jan 29 12:28:29 crc kubenswrapper[4753]: I0129 12:28:29.166610 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1279ea7f-350d-4657-8d79-d384e6974700-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "1279ea7f-350d-4657-8d79-d384e6974700" (UID: "1279ea7f-350d-4657-8d79-d384e6974700"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:28:29 crc kubenswrapper[4753]: I0129 12:28:29.166842 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1279ea7f-350d-4657-8d79-d384e6974700-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "1279ea7f-350d-4657-8d79-d384e6974700" (UID: "1279ea7f-350d-4657-8d79-d384e6974700"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:28:29 crc kubenswrapper[4753]: I0129 12:28:29.171198 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1279ea7f-350d-4657-8d79-d384e6974700-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "1279ea7f-350d-4657-8d79-d384e6974700" (UID: "1279ea7f-350d-4657-8d79-d384e6974700"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:28:29 crc kubenswrapper[4753]: I0129 12:28:29.171309 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1279ea7f-350d-4657-8d79-d384e6974700-kube-api-access-rbtfh" (OuterVolumeSpecName: "kube-api-access-rbtfh") pod "1279ea7f-350d-4657-8d79-d384e6974700" (UID: "1279ea7f-350d-4657-8d79-d384e6974700"). InnerVolumeSpecName "kube-api-access-rbtfh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:28:29 crc kubenswrapper[4753]: I0129 12:28:29.203928 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1279ea7f-350d-4657-8d79-d384e6974700-config-data" (OuterVolumeSpecName: "config-data") pod "1279ea7f-350d-4657-8d79-d384e6974700" (UID: "1279ea7f-350d-4657-8d79-d384e6974700"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:28:29 crc kubenswrapper[4753]: I0129 12:28:29.267811 4753 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/1279ea7f-350d-4657-8d79-d384e6974700-etc-swift\") on node \"crc\" DevicePath \"\"" Jan 29 12:28:29 crc kubenswrapper[4753]: I0129 12:28:29.267859 4753 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1279ea7f-350d-4657-8d79-d384e6974700-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 29 12:28:29 crc kubenswrapper[4753]: I0129 12:28:29.267869 4753 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1279ea7f-350d-4657-8d79-d384e6974700-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 29 12:28:29 crc kubenswrapper[4753]: I0129 12:28:29.267881 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rbtfh\" (UniqueName: \"kubernetes.io/projected/1279ea7f-350d-4657-8d79-d384e6974700-kube-api-access-rbtfh\") on node \"crc\" DevicePath \"\"" Jan 29 12:28:29 crc kubenswrapper[4753]: I0129 12:28:29.267895 4753 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1279ea7f-350d-4657-8d79-d384e6974700-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 12:28:29 crc kubenswrapper[4753]: I0129 12:28:29.612860 4753 generic.go:334] "Generic (PLEG): container finished" podID="ddeb279d-2025-4326-8e65-865d310f7c0a" containerID="40e6b5dc9cea06540198cf963e5c70e91ace9da403911908c871f752fe076bc2" exitCode=0 Jan 29 12:28:29 crc kubenswrapper[4753]: I0129 12:28:29.612898 4753 generic.go:334] "Generic (PLEG): container finished" podID="ddeb279d-2025-4326-8e65-865d310f7c0a" containerID="78e57805ad2975127bfcf29509b23f4d1294dcfd0848150a2111e6b61c4ddfba" exitCode=0 Jan 29 12:28:29 crc kubenswrapper[4753]: I0129 12:28:29.612911 4753 generic.go:334] "Generic (PLEG): container finished" podID="ddeb279d-2025-4326-8e65-865d310f7c0a" containerID="ab8ab1b0296c8d63c94fe76b9be4dff85e2c3d3c5e119f4c61a4e952a3eacb57" exitCode=0 Jan 29 12:28:29 crc kubenswrapper[4753]: I0129 12:28:29.612924 4753 generic.go:334] "Generic (PLEG): container finished" podID="ddeb279d-2025-4326-8e65-865d310f7c0a" containerID="e2f1954c5859377fa2807abc43754082b700caff19ed4069e8ed7c2b84149b60" exitCode=0 Jan 29 12:28:29 crc kubenswrapper[4753]: I0129 12:28:29.612950 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"ddeb279d-2025-4326-8e65-865d310f7c0a","Type":"ContainerDied","Data":"40e6b5dc9cea06540198cf963e5c70e91ace9da403911908c871f752fe076bc2"} Jan 29 12:28:29 crc kubenswrapper[4753]: I0129 12:28:29.613014 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"ddeb279d-2025-4326-8e65-865d310f7c0a","Type":"ContainerDied","Data":"78e57805ad2975127bfcf29509b23f4d1294dcfd0848150a2111e6b61c4ddfba"} Jan 29 12:28:29 crc kubenswrapper[4753]: I0129 12:28:29.613030 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"ddeb279d-2025-4326-8e65-865d310f7c0a","Type":"ContainerDied","Data":"ab8ab1b0296c8d63c94fe76b9be4dff85e2c3d3c5e119f4c61a4e952a3eacb57"} Jan 29 12:28:29 crc kubenswrapper[4753]: I0129 12:28:29.613043 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"ddeb279d-2025-4326-8e65-865d310f7c0a","Type":"ContainerDied","Data":"e2f1954c5859377fa2807abc43754082b700caff19ed4069e8ed7c2b84149b60"} Jan 29 12:28:29 crc kubenswrapper[4753]: I0129 12:28:29.620930 4753 generic.go:334] "Generic (PLEG): container finished" podID="39385d87-5895-47b3-8fa4-dff5549bca97" containerID="f512c2300a33e2cb5b259c3418f458b2d6f86634ccce39b9adee884ae693a404" exitCode=0 Jan 29 12:28:29 crc kubenswrapper[4753]: I0129 12:28:29.620962 4753 generic.go:334] "Generic (PLEG): container finished" podID="39385d87-5895-47b3-8fa4-dff5549bca97" containerID="167d4dbddcf9e32fccd34fe86b69996a5f6c260ba7e291f34f64d24b772b8ac8" exitCode=0 Jan 29 12:28:29 crc kubenswrapper[4753]: I0129 12:28:29.620969 4753 generic.go:334] "Generic (PLEG): container finished" podID="39385d87-5895-47b3-8fa4-dff5549bca97" containerID="3726b25caf518cbf35e2f777e83e1ecfee7780f3edbad81b396ea283f5da3a4a" exitCode=0 Jan 29 12:28:29 crc kubenswrapper[4753]: I0129 12:28:29.620962 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"39385d87-5895-47b3-8fa4-dff5549bca97","Type":"ContainerDied","Data":"f512c2300a33e2cb5b259c3418f458b2d6f86634ccce39b9adee884ae693a404"} Jan 29 12:28:29 crc kubenswrapper[4753]: I0129 12:28:29.621013 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"39385d87-5895-47b3-8fa4-dff5549bca97","Type":"ContainerDied","Data":"167d4dbddcf9e32fccd34fe86b69996a5f6c260ba7e291f34f64d24b772b8ac8"} Jan 29 12:28:29 crc kubenswrapper[4753]: I0129 12:28:29.621025 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"39385d87-5895-47b3-8fa4-dff5549bca97","Type":"ContainerDied","Data":"3726b25caf518cbf35e2f777e83e1ecfee7780f3edbad81b396ea283f5da3a4a"} Jan 29 12:28:29 crc kubenswrapper[4753]: I0129 12:28:29.625818 4753 generic.go:334] "Generic (PLEG): container finished" podID="1279ea7f-350d-4657-8d79-d384e6974700" containerID="9aa82879a77a70118ab711b5e80d710b9819382b80bdf7b5852721678330b2b2" exitCode=0 Jan 29 12:28:29 crc kubenswrapper[4753]: I0129 12:28:29.625983 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-proxy-6bc96d68cf-vw4t2" event={"ID":"1279ea7f-350d-4657-8d79-d384e6974700","Type":"ContainerDied","Data":"9aa82879a77a70118ab711b5e80d710b9819382b80bdf7b5852721678330b2b2"} Jan 29 12:28:29 crc kubenswrapper[4753]: I0129 12:28:29.626019 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-proxy-6bc96d68cf-vw4t2" event={"ID":"1279ea7f-350d-4657-8d79-d384e6974700","Type":"ContainerDied","Data":"d0e294ae3117d30051d766d0204b85d7e203d7039512163ef3dd7365354ab3ac"} Jan 29 12:28:29 crc kubenswrapper[4753]: I0129 12:28:29.626024 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-proxy-6bc96d68cf-vw4t2" Jan 29 12:28:29 crc kubenswrapper[4753]: I0129 12:28:29.626038 4753 scope.go:117] "RemoveContainer" containerID="9aa82879a77a70118ab711b5e80d710b9819382b80bdf7b5852721678330b2b2" Jan 29 12:28:29 crc kubenswrapper[4753]: I0129 12:28:29.636916 4753 generic.go:334] "Generic (PLEG): container finished" podID="32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc" containerID="263c42f493bd95e422bf98dec8882fa3d3524e56d9ce4bc9f9ecf43ceb8c3a2e" exitCode=0 Jan 29 12:28:29 crc kubenswrapper[4753]: I0129 12:28:29.636952 4753 generic.go:334] "Generic (PLEG): container finished" podID="32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc" containerID="b601e419b6959550e2118567697a445d92d4c7c854603a5c05ad0a8bd8840b30" exitCode=0 Jan 29 12:28:29 crc kubenswrapper[4753]: I0129 12:28:29.636959 4753 generic.go:334] "Generic (PLEG): container finished" podID="32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc" containerID="00c1eb87259c9f9ca790410d154830681a972b41be2f543d941b16c768d02c7a" exitCode=0 Jan 29 12:28:29 crc kubenswrapper[4753]: I0129 12:28:29.636966 4753 generic.go:334] "Generic (PLEG): container finished" podID="32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc" containerID="fe8f9ebcc1c86411e3bb093c67fb21d93a96fee55951ceee7e3131efeb068618" exitCode=0 Jan 29 12:28:29 crc kubenswrapper[4753]: I0129 12:28:29.636991 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc","Type":"ContainerDied","Data":"263c42f493bd95e422bf98dec8882fa3d3524e56d9ce4bc9f9ecf43ceb8c3a2e"} Jan 29 12:28:29 crc kubenswrapper[4753]: I0129 12:28:29.637020 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc","Type":"ContainerDied","Data":"b601e419b6959550e2118567697a445d92d4c7c854603a5c05ad0a8bd8840b30"} Jan 29 12:28:29 crc kubenswrapper[4753]: I0129 12:28:29.637030 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc","Type":"ContainerDied","Data":"00c1eb87259c9f9ca790410d154830681a972b41be2f543d941b16c768d02c7a"} Jan 29 12:28:29 crc kubenswrapper[4753]: I0129 12:28:29.637048 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc","Type":"ContainerDied","Data":"fe8f9ebcc1c86411e3bb093c67fb21d93a96fee55951ceee7e3131efeb068618"} Jan 29 12:28:29 crc kubenswrapper[4753]: I0129 12:28:29.654841 4753 scope.go:117] "RemoveContainer" containerID="f9e68885ed21f11c82942f493d4bec986cbcb9cfc313e2a9c0c92797057fe94f" Jan 29 12:28:29 crc kubenswrapper[4753]: I0129 12:28:29.672060 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["swift-kuttl-tests/swift-proxy-6bc96d68cf-vw4t2"] Jan 29 12:28:29 crc kubenswrapper[4753]: I0129 12:28:29.681245 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["swift-kuttl-tests/swift-proxy-6bc96d68cf-vw4t2"] Jan 29 12:28:29 crc kubenswrapper[4753]: I0129 12:28:29.683613 4753 scope.go:117] "RemoveContainer" containerID="9aa82879a77a70118ab711b5e80d710b9819382b80bdf7b5852721678330b2b2" Jan 29 12:28:29 crc kubenswrapper[4753]: E0129 12:28:29.684141 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9aa82879a77a70118ab711b5e80d710b9819382b80bdf7b5852721678330b2b2\": container with ID starting with 9aa82879a77a70118ab711b5e80d710b9819382b80bdf7b5852721678330b2b2 not found: ID does not exist" containerID="9aa82879a77a70118ab711b5e80d710b9819382b80bdf7b5852721678330b2b2" Jan 29 12:28:29 crc kubenswrapper[4753]: I0129 12:28:29.684184 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9aa82879a77a70118ab711b5e80d710b9819382b80bdf7b5852721678330b2b2"} err="failed to get container status \"9aa82879a77a70118ab711b5e80d710b9819382b80bdf7b5852721678330b2b2\": rpc error: code = NotFound desc = could not find container \"9aa82879a77a70118ab711b5e80d710b9819382b80bdf7b5852721678330b2b2\": container with ID starting with 9aa82879a77a70118ab711b5e80d710b9819382b80bdf7b5852721678330b2b2 not found: ID does not exist" Jan 29 12:28:29 crc kubenswrapper[4753]: I0129 12:28:29.684208 4753 scope.go:117] "RemoveContainer" containerID="f9e68885ed21f11c82942f493d4bec986cbcb9cfc313e2a9c0c92797057fe94f" Jan 29 12:28:29 crc kubenswrapper[4753]: E0129 12:28:29.684594 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f9e68885ed21f11c82942f493d4bec986cbcb9cfc313e2a9c0c92797057fe94f\": container with ID starting with f9e68885ed21f11c82942f493d4bec986cbcb9cfc313e2a9c0c92797057fe94f not found: ID does not exist" containerID="f9e68885ed21f11c82942f493d4bec986cbcb9cfc313e2a9c0c92797057fe94f" Jan 29 12:28:29 crc kubenswrapper[4753]: I0129 12:28:29.684638 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f9e68885ed21f11c82942f493d4bec986cbcb9cfc313e2a9c0c92797057fe94f"} err="failed to get container status \"f9e68885ed21f11c82942f493d4bec986cbcb9cfc313e2a9c0c92797057fe94f\": rpc error: code = NotFound desc = could not find container \"f9e68885ed21f11c82942f493d4bec986cbcb9cfc313e2a9c0c92797057fe94f\": container with ID starting with f9e68885ed21f11c82942f493d4bec986cbcb9cfc313e2a9c0c92797057fe94f not found: ID does not exist" Jan 29 12:28:29 crc kubenswrapper[4753]: I0129 12:28:29.897636 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1279ea7f-350d-4657-8d79-d384e6974700" path="/var/lib/kubelet/pods/1279ea7f-350d-4657-8d79-d384e6974700/volumes" Jan 29 12:28:29 crc kubenswrapper[4753]: I0129 12:28:29.898498 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="74a6966a-a456-4920-89a6-a231ed9bb1a7" path="/var/lib/kubelet/pods/74a6966a-a456-4920-89a6-a231ed9bb1a7/volumes" Jan 29 12:28:58 crc kubenswrapper[4753]: I0129 12:28:58.927840 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:28:58 crc kubenswrapper[4753]: I0129 12:28:58.951741 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-storage-1" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.005253 4753 generic.go:334] "Generic (PLEG): container finished" podID="32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc" containerID="089bfd5f212edcb63517b6be2c9e93e85788d0a36322b33d159dc4276eeffa8c" exitCode=137 Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.005364 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc","Type":"ContainerDied","Data":"089bfd5f212edcb63517b6be2c9e93e85788d0a36322b33d159dc4276eeffa8c"} Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.012927 4753 generic.go:334] "Generic (PLEG): container finished" podID="ddeb279d-2025-4326-8e65-865d310f7c0a" containerID="8e356a88be616efe8f74f65c56281ee6e48af8b76b2a36f9cf9de679f1c7b6b8" exitCode=137 Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.012975 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"ddeb279d-2025-4326-8e65-865d310f7c0a","Type":"ContainerDied","Data":"8e356a88be616efe8f74f65c56281ee6e48af8b76b2a36f9cf9de679f1c7b6b8"} Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.013061 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"ddeb279d-2025-4326-8e65-865d310f7c0a","Type":"ContainerDied","Data":"fa7b05f6c58595f497027116e691b925777e495abff02bf0eb48128d2f59d397"} Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.013087 4753 scope.go:117] "RemoveContainer" containerID="8e356a88be616efe8f74f65c56281ee6e48af8b76b2a36f9cf9de679f1c7b6b8" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.013429 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-storage-1" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.020568 4753 generic.go:334] "Generic (PLEG): container finished" podID="39385d87-5895-47b3-8fa4-dff5549bca97" containerID="e7da666c1e8d2b0689ffa24259c62b01b913567e00d5b3ed3b19dd77e0c6c533" exitCode=137 Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.020974 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"39385d87-5895-47b3-8fa4-dff5549bca97","Type":"ContainerDied","Data":"e7da666c1e8d2b0689ffa24259c62b01b913567e00d5b3ed3b19dd77e0c6c533"} Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.021010 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"39385d87-5895-47b3-8fa4-dff5549bca97","Type":"ContainerDied","Data":"213c58c1ff01cb6eb2c6a589e4bc52ebd541e63a0293cd2b41fa7f74ca31542d"} Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.021081 4753 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"f512c2300a33e2cb5b259c3418f458b2d6f86634ccce39b9adee884ae693a404"} Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.021110 4753 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"737616607dae86265208ff6f145edb0ce8b20dba96eda0daf445337b0fef1935"} Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.021115 4753 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"75aca0fed29877eb2ade05283d8faf4a4c3f794d44c85614b217f1bf2ae5b2ce"} Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.021120 4753 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"76c9500d039ba3a69ffb165677dd2e12bdadadeb261817ebd10496ce731615ce"} Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.021127 4753 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"167d4dbddcf9e32fccd34fe86b69996a5f6c260ba7e291f34f64d24b772b8ac8"} Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.021142 4753 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"984e7ec23fbb1ea4d65de4f32027ca929ec222f93bf54d3a02596d29a4717385"} Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.021147 4753 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"063062d207c79c3b7f996082ef0d8f966b55a5285627a39041e569d21584423f"} Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.021153 4753 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"af0f9b6fe749f96595fa413d19eb4d05d225aa607b0bdca7f6249d9c3c82af97"} Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.021158 4753 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"3726b25caf518cbf35e2f777e83e1ecfee7780f3edbad81b396ea283f5da3a4a"} Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.021294 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.043509 4753 scope.go:117] "RemoveContainer" containerID="40e6b5dc9cea06540198cf963e5c70e91ace9da403911908c871f752fe076bc2" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.052703 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-llvjn\" (UniqueName: \"kubernetes.io/projected/39385d87-5895-47b3-8fa4-dff5549bca97-kube-api-access-llvjn\") pod \"39385d87-5895-47b3-8fa4-dff5549bca97\" (UID: \"39385d87-5895-47b3-8fa4-dff5549bca97\") " Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.052786 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/ddeb279d-2025-4326-8e65-865d310f7c0a-cache\") pod \"ddeb279d-2025-4326-8e65-865d310f7c0a\" (UID: \"ddeb279d-2025-4326-8e65-865d310f7c0a\") " Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.052846 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/ddeb279d-2025-4326-8e65-865d310f7c0a-lock\") pod \"ddeb279d-2025-4326-8e65-865d310f7c0a\" (UID: \"ddeb279d-2025-4326-8e65-865d310f7c0a\") " Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.052914 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swift\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"39385d87-5895-47b3-8fa4-dff5549bca97\" (UID: \"39385d87-5895-47b3-8fa4-dff5549bca97\") " Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.052994 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swift\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"ddeb279d-2025-4326-8e65-865d310f7c0a\" (UID: \"ddeb279d-2025-4326-8e65-865d310f7c0a\") " Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.053022 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/39385d87-5895-47b3-8fa4-dff5549bca97-etc-swift\") pod \"39385d87-5895-47b3-8fa4-dff5549bca97\" (UID: \"39385d87-5895-47b3-8fa4-dff5549bca97\") " Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.053118 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qqqqf\" (UniqueName: \"kubernetes.io/projected/ddeb279d-2025-4326-8e65-865d310f7c0a-kube-api-access-qqqqf\") pod \"ddeb279d-2025-4326-8e65-865d310f7c0a\" (UID: \"ddeb279d-2025-4326-8e65-865d310f7c0a\") " Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.053177 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/ddeb279d-2025-4326-8e65-865d310f7c0a-etc-swift\") pod \"ddeb279d-2025-4326-8e65-865d310f7c0a\" (UID: \"ddeb279d-2025-4326-8e65-865d310f7c0a\") " Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.053239 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/39385d87-5895-47b3-8fa4-dff5549bca97-cache\") pod \"39385d87-5895-47b3-8fa4-dff5549bca97\" (UID: \"39385d87-5895-47b3-8fa4-dff5549bca97\") " Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.053286 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/39385d87-5895-47b3-8fa4-dff5549bca97-lock\") pod \"39385d87-5895-47b3-8fa4-dff5549bca97\" (UID: \"39385d87-5895-47b3-8fa4-dff5549bca97\") " Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.054767 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/39385d87-5895-47b3-8fa4-dff5549bca97-lock" (OuterVolumeSpecName: "lock") pod "39385d87-5895-47b3-8fa4-dff5549bca97" (UID: "39385d87-5895-47b3-8fa4-dff5549bca97"). InnerVolumeSpecName "lock". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.063051 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage06-crc" (OuterVolumeSpecName: "swift") pod "ddeb279d-2025-4326-8e65-865d310f7c0a" (UID: "ddeb279d-2025-4326-8e65-865d310f7c0a"). InnerVolumeSpecName "local-storage06-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.063132 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ddeb279d-2025-4326-8e65-865d310f7c0a-cache" (OuterVolumeSpecName: "cache") pod "ddeb279d-2025-4326-8e65-865d310f7c0a" (UID: "ddeb279d-2025-4326-8e65-865d310f7c0a"). InnerVolumeSpecName "cache". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.063998 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/39385d87-5895-47b3-8fa4-dff5549bca97-kube-api-access-llvjn" (OuterVolumeSpecName: "kube-api-access-llvjn") pod "39385d87-5895-47b3-8fa4-dff5549bca97" (UID: "39385d87-5895-47b3-8fa4-dff5549bca97"). InnerVolumeSpecName "kube-api-access-llvjn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.064794 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ddeb279d-2025-4326-8e65-865d310f7c0a-kube-api-access-qqqqf" (OuterVolumeSpecName: "kube-api-access-qqqqf") pod "ddeb279d-2025-4326-8e65-865d310f7c0a" (UID: "ddeb279d-2025-4326-8e65-865d310f7c0a"). InnerVolumeSpecName "kube-api-access-qqqqf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.064822 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/39385d87-5895-47b3-8fa4-dff5549bca97-cache" (OuterVolumeSpecName: "cache") pod "39385d87-5895-47b3-8fa4-dff5549bca97" (UID: "39385d87-5895-47b3-8fa4-dff5549bca97"). InnerVolumeSpecName "cache". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.065055 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage11-crc" (OuterVolumeSpecName: "swift") pod "39385d87-5895-47b3-8fa4-dff5549bca97" (UID: "39385d87-5895-47b3-8fa4-dff5549bca97"). InnerVolumeSpecName "local-storage11-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.066365 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ddeb279d-2025-4326-8e65-865d310f7c0a-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "ddeb279d-2025-4326-8e65-865d310f7c0a" (UID: "ddeb279d-2025-4326-8e65-865d310f7c0a"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.066620 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/39385d87-5895-47b3-8fa4-dff5549bca97-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "39385d87-5895-47b3-8fa4-dff5549bca97" (UID: "39385d87-5895-47b3-8fa4-dff5549bca97"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.071442 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ddeb279d-2025-4326-8e65-865d310f7c0a-lock" (OuterVolumeSpecName: "lock") pod "ddeb279d-2025-4326-8e65-865d310f7c0a" (UID: "ddeb279d-2025-4326-8e65-865d310f7c0a"). InnerVolumeSpecName "lock". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.132154 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-storage-2" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.132996 4753 scope.go:117] "RemoveContainer" containerID="a60f6b51faf65756a3c0471962b2bdf525b7e3029c0357dd153d2da4daa57b8a" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.149298 4753 scope.go:117] "RemoveContainer" containerID="13d565afdd53734d519d75262cc241dad905071cb7825483fd615878fa346029" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.154564 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc-cache\") pod \"32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc\" (UID: \"32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc\") " Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.154675 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc-etc-swift\") pod \"32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc\" (UID: \"32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc\") " Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.154741 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z446w\" (UniqueName: \"kubernetes.io/projected/32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc-kube-api-access-z446w\") pod \"32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc\" (UID: \"32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc\") " Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.154770 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swift\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc\" (UID: \"32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc\") " Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.154828 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc-lock\") pod \"32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc\" (UID: \"32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc\") " Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.155203 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qqqqf\" (UniqueName: \"kubernetes.io/projected/ddeb279d-2025-4326-8e65-865d310f7c0a-kube-api-access-qqqqf\") on node \"crc\" DevicePath \"\"" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.155215 4753 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/ddeb279d-2025-4326-8e65-865d310f7c0a-etc-swift\") on node \"crc\" DevicePath \"\"" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.155372 4753 reconciler_common.go:293] "Volume detached for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/39385d87-5895-47b3-8fa4-dff5549bca97-cache\") on node \"crc\" DevicePath \"\"" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.155384 4753 reconciler_common.go:293] "Volume detached for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/39385d87-5895-47b3-8fa4-dff5549bca97-lock\") on node \"crc\" DevicePath \"\"" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.155393 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-llvjn\" (UniqueName: \"kubernetes.io/projected/39385d87-5895-47b3-8fa4-dff5549bca97-kube-api-access-llvjn\") on node \"crc\" DevicePath \"\"" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.155402 4753 reconciler_common.go:293] "Volume detached for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/ddeb279d-2025-4326-8e65-865d310f7c0a-cache\") on node \"crc\" DevicePath \"\"" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.155409 4753 reconciler_common.go:293] "Volume detached for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/ddeb279d-2025-4326-8e65-865d310f7c0a-lock\") on node \"crc\" DevicePath \"\"" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.155448 4753 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" " Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.155463 4753 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" " Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.155472 4753 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/39385d87-5895-47b3-8fa4-dff5549bca97-etc-swift\") on node \"crc\" DevicePath \"\"" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.156458 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc-cache" (OuterVolumeSpecName: "cache") pod "32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc" (UID: "32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc"). InnerVolumeSpecName "cache". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.157914 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc-lock" (OuterVolumeSpecName: "lock") pod "32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc" (UID: "32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc"). InnerVolumeSpecName "lock". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.160251 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc-kube-api-access-z446w" (OuterVolumeSpecName: "kube-api-access-z446w") pod "32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc" (UID: "32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc"). InnerVolumeSpecName "kube-api-access-z446w". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.162355 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage04-crc" (OuterVolumeSpecName: "swift") pod "32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc" (UID: "32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc"). InnerVolumeSpecName "local-storage04-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.165726 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc" (UID: "32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.189805 4753 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage11-crc" (UniqueName: "kubernetes.io/local-volume/local-storage11-crc") on node "crc" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.196611 4753 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage06-crc" (UniqueName: "kubernetes.io/local-volume/local-storage06-crc") on node "crc" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.219890 4753 scope.go:117] "RemoveContainer" containerID="74e579bb81dd2d0a90b87f4e8962a16fb2e6352c7384af62591ce4ac200cc268" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.235995 4753 scope.go:117] "RemoveContainer" containerID="7a17416ceba41ee4beed6c5cd6e769268f0fb17d13e4cf402b2974089c52b3c5" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.250822 4753 scope.go:117] "RemoveContainer" containerID="78e57805ad2975127bfcf29509b23f4d1294dcfd0848150a2111e6b61c4ddfba" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.252398 4753 patch_prober.go:28] interesting pod/machine-config-daemon-7c24x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.252468 4753 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" podUID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.256802 4753 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc-etc-swift\") on node \"crc\" DevicePath \"\"" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.256826 4753 reconciler_common.go:293] "Volume detached for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" DevicePath \"\"" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.256838 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z446w\" (UniqueName: \"kubernetes.io/projected/32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc-kube-api-access-z446w\") on node \"crc\" DevicePath \"\"" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.256869 4753 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" " Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.256879 4753 reconciler_common.go:293] "Volume detached for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" DevicePath \"\"" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.256887 4753 reconciler_common.go:293] "Volume detached for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc-lock\") on node \"crc\" DevicePath \"\"" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.256895 4753 reconciler_common.go:293] "Volume detached for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc-cache\") on node \"crc\" DevicePath \"\"" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.272897 4753 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage04-crc" (UniqueName: "kubernetes.io/local-volume/local-storage04-crc") on node "crc" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.275394 4753 scope.go:117] "RemoveContainer" containerID="e35b583e269bd87ef2536a7423d7e23d108405a6dac8032ce67f1432845ac9ee" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.293367 4753 scope.go:117] "RemoveContainer" containerID="39cbef45410dc33c3ccd280dd8dbd93028f01d71d87119a2009d38d2fab5647f" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.318484 4753 scope.go:117] "RemoveContainer" containerID="c2c67d359cc4c35fc1b93d25f31dd1acbe50d819456e7ba45edbca5cc66b76ad" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.337550 4753 scope.go:117] "RemoveContainer" containerID="ab8ab1b0296c8d63c94fe76b9be4dff85e2c3d3c5e119f4c61a4e952a3eacb57" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.351567 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["swift-kuttl-tests/swift-storage-1"] Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.358713 4753 reconciler_common.go:293] "Volume detached for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" DevicePath \"\"" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.367135 4753 scope.go:117] "RemoveContainer" containerID="c02e393358bd7fc72cbcc938357a001d8e45a3614c97f8c21ef2b425a11ea56c" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.367977 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["swift-kuttl-tests/swift-storage-1"] Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.373971 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["swift-kuttl-tests/swift-storage-0"] Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.381246 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["swift-kuttl-tests/swift-storage-0"] Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.405078 4753 scope.go:117] "RemoveContainer" containerID="8066888e8c4974033e42ad712726fb89b198041760f9941ccf400414fe67f7c4" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.431315 4753 scope.go:117] "RemoveContainer" containerID="62808980bc02e825395831219271d7565dbce4f504194b0b1d24f3ec68d29c7a" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.445863 4753 scope.go:117] "RemoveContainer" containerID="e2f1954c5859377fa2807abc43754082b700caff19ed4069e8ed7c2b84149b60" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.461736 4753 scope.go:117] "RemoveContainer" containerID="8e356a88be616efe8f74f65c56281ee6e48af8b76b2a36f9cf9de679f1c7b6b8" Jan 29 12:28:59 crc kubenswrapper[4753]: E0129 12:28:59.462670 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8e356a88be616efe8f74f65c56281ee6e48af8b76b2a36f9cf9de679f1c7b6b8\": container with ID starting with 8e356a88be616efe8f74f65c56281ee6e48af8b76b2a36f9cf9de679f1c7b6b8 not found: ID does not exist" containerID="8e356a88be616efe8f74f65c56281ee6e48af8b76b2a36f9cf9de679f1c7b6b8" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.463725 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8e356a88be616efe8f74f65c56281ee6e48af8b76b2a36f9cf9de679f1c7b6b8"} err="failed to get container status \"8e356a88be616efe8f74f65c56281ee6e48af8b76b2a36f9cf9de679f1c7b6b8\": rpc error: code = NotFound desc = could not find container \"8e356a88be616efe8f74f65c56281ee6e48af8b76b2a36f9cf9de679f1c7b6b8\": container with ID starting with 8e356a88be616efe8f74f65c56281ee6e48af8b76b2a36f9cf9de679f1c7b6b8 not found: ID does not exist" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.463897 4753 scope.go:117] "RemoveContainer" containerID="40e6b5dc9cea06540198cf963e5c70e91ace9da403911908c871f752fe076bc2" Jan 29 12:28:59 crc kubenswrapper[4753]: E0129 12:28:59.465106 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"40e6b5dc9cea06540198cf963e5c70e91ace9da403911908c871f752fe076bc2\": container with ID starting with 40e6b5dc9cea06540198cf963e5c70e91ace9da403911908c871f752fe076bc2 not found: ID does not exist" containerID="40e6b5dc9cea06540198cf963e5c70e91ace9da403911908c871f752fe076bc2" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.465143 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"40e6b5dc9cea06540198cf963e5c70e91ace9da403911908c871f752fe076bc2"} err="failed to get container status \"40e6b5dc9cea06540198cf963e5c70e91ace9da403911908c871f752fe076bc2\": rpc error: code = NotFound desc = could not find container \"40e6b5dc9cea06540198cf963e5c70e91ace9da403911908c871f752fe076bc2\": container with ID starting with 40e6b5dc9cea06540198cf963e5c70e91ace9da403911908c871f752fe076bc2 not found: ID does not exist" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.465161 4753 scope.go:117] "RemoveContainer" containerID="a60f6b51faf65756a3c0471962b2bdf525b7e3029c0357dd153d2da4daa57b8a" Jan 29 12:28:59 crc kubenswrapper[4753]: E0129 12:28:59.465668 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a60f6b51faf65756a3c0471962b2bdf525b7e3029c0357dd153d2da4daa57b8a\": container with ID starting with a60f6b51faf65756a3c0471962b2bdf525b7e3029c0357dd153d2da4daa57b8a not found: ID does not exist" containerID="a60f6b51faf65756a3c0471962b2bdf525b7e3029c0357dd153d2da4daa57b8a" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.465797 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a60f6b51faf65756a3c0471962b2bdf525b7e3029c0357dd153d2da4daa57b8a"} err="failed to get container status \"a60f6b51faf65756a3c0471962b2bdf525b7e3029c0357dd153d2da4daa57b8a\": rpc error: code = NotFound desc = could not find container \"a60f6b51faf65756a3c0471962b2bdf525b7e3029c0357dd153d2da4daa57b8a\": container with ID starting with a60f6b51faf65756a3c0471962b2bdf525b7e3029c0357dd153d2da4daa57b8a not found: ID does not exist" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.465901 4753 scope.go:117] "RemoveContainer" containerID="13d565afdd53734d519d75262cc241dad905071cb7825483fd615878fa346029" Jan 29 12:28:59 crc kubenswrapper[4753]: E0129 12:28:59.466385 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"13d565afdd53734d519d75262cc241dad905071cb7825483fd615878fa346029\": container with ID starting with 13d565afdd53734d519d75262cc241dad905071cb7825483fd615878fa346029 not found: ID does not exist" containerID="13d565afdd53734d519d75262cc241dad905071cb7825483fd615878fa346029" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.466417 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"13d565afdd53734d519d75262cc241dad905071cb7825483fd615878fa346029"} err="failed to get container status \"13d565afdd53734d519d75262cc241dad905071cb7825483fd615878fa346029\": rpc error: code = NotFound desc = could not find container \"13d565afdd53734d519d75262cc241dad905071cb7825483fd615878fa346029\": container with ID starting with 13d565afdd53734d519d75262cc241dad905071cb7825483fd615878fa346029 not found: ID does not exist" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.466435 4753 scope.go:117] "RemoveContainer" containerID="74e579bb81dd2d0a90b87f4e8962a16fb2e6352c7384af62591ce4ac200cc268" Jan 29 12:28:59 crc kubenswrapper[4753]: E0129 12:28:59.467131 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"74e579bb81dd2d0a90b87f4e8962a16fb2e6352c7384af62591ce4ac200cc268\": container with ID starting with 74e579bb81dd2d0a90b87f4e8962a16fb2e6352c7384af62591ce4ac200cc268 not found: ID does not exist" containerID="74e579bb81dd2d0a90b87f4e8962a16fb2e6352c7384af62591ce4ac200cc268" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.467384 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"74e579bb81dd2d0a90b87f4e8962a16fb2e6352c7384af62591ce4ac200cc268"} err="failed to get container status \"74e579bb81dd2d0a90b87f4e8962a16fb2e6352c7384af62591ce4ac200cc268\": rpc error: code = NotFound desc = could not find container \"74e579bb81dd2d0a90b87f4e8962a16fb2e6352c7384af62591ce4ac200cc268\": container with ID starting with 74e579bb81dd2d0a90b87f4e8962a16fb2e6352c7384af62591ce4ac200cc268 not found: ID does not exist" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.470386 4753 scope.go:117] "RemoveContainer" containerID="7a17416ceba41ee4beed6c5cd6e769268f0fb17d13e4cf402b2974089c52b3c5" Jan 29 12:28:59 crc kubenswrapper[4753]: E0129 12:28:59.471015 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7a17416ceba41ee4beed6c5cd6e769268f0fb17d13e4cf402b2974089c52b3c5\": container with ID starting with 7a17416ceba41ee4beed6c5cd6e769268f0fb17d13e4cf402b2974089c52b3c5 not found: ID does not exist" containerID="7a17416ceba41ee4beed6c5cd6e769268f0fb17d13e4cf402b2974089c52b3c5" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.471052 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7a17416ceba41ee4beed6c5cd6e769268f0fb17d13e4cf402b2974089c52b3c5"} err="failed to get container status \"7a17416ceba41ee4beed6c5cd6e769268f0fb17d13e4cf402b2974089c52b3c5\": rpc error: code = NotFound desc = could not find container \"7a17416ceba41ee4beed6c5cd6e769268f0fb17d13e4cf402b2974089c52b3c5\": container with ID starting with 7a17416ceba41ee4beed6c5cd6e769268f0fb17d13e4cf402b2974089c52b3c5 not found: ID does not exist" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.471071 4753 scope.go:117] "RemoveContainer" containerID="78e57805ad2975127bfcf29509b23f4d1294dcfd0848150a2111e6b61c4ddfba" Jan 29 12:28:59 crc kubenswrapper[4753]: E0129 12:28:59.471339 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"78e57805ad2975127bfcf29509b23f4d1294dcfd0848150a2111e6b61c4ddfba\": container with ID starting with 78e57805ad2975127bfcf29509b23f4d1294dcfd0848150a2111e6b61c4ddfba not found: ID does not exist" containerID="78e57805ad2975127bfcf29509b23f4d1294dcfd0848150a2111e6b61c4ddfba" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.471456 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"78e57805ad2975127bfcf29509b23f4d1294dcfd0848150a2111e6b61c4ddfba"} err="failed to get container status \"78e57805ad2975127bfcf29509b23f4d1294dcfd0848150a2111e6b61c4ddfba\": rpc error: code = NotFound desc = could not find container \"78e57805ad2975127bfcf29509b23f4d1294dcfd0848150a2111e6b61c4ddfba\": container with ID starting with 78e57805ad2975127bfcf29509b23f4d1294dcfd0848150a2111e6b61c4ddfba not found: ID does not exist" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.471551 4753 scope.go:117] "RemoveContainer" containerID="e35b583e269bd87ef2536a7423d7e23d108405a6dac8032ce67f1432845ac9ee" Jan 29 12:28:59 crc kubenswrapper[4753]: E0129 12:28:59.471868 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e35b583e269bd87ef2536a7423d7e23d108405a6dac8032ce67f1432845ac9ee\": container with ID starting with e35b583e269bd87ef2536a7423d7e23d108405a6dac8032ce67f1432845ac9ee not found: ID does not exist" containerID="e35b583e269bd87ef2536a7423d7e23d108405a6dac8032ce67f1432845ac9ee" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.471895 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e35b583e269bd87ef2536a7423d7e23d108405a6dac8032ce67f1432845ac9ee"} err="failed to get container status \"e35b583e269bd87ef2536a7423d7e23d108405a6dac8032ce67f1432845ac9ee\": rpc error: code = NotFound desc = could not find container \"e35b583e269bd87ef2536a7423d7e23d108405a6dac8032ce67f1432845ac9ee\": container with ID starting with e35b583e269bd87ef2536a7423d7e23d108405a6dac8032ce67f1432845ac9ee not found: ID does not exist" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.471918 4753 scope.go:117] "RemoveContainer" containerID="39cbef45410dc33c3ccd280dd8dbd93028f01d71d87119a2009d38d2fab5647f" Jan 29 12:28:59 crc kubenswrapper[4753]: E0129 12:28:59.472248 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"39cbef45410dc33c3ccd280dd8dbd93028f01d71d87119a2009d38d2fab5647f\": container with ID starting with 39cbef45410dc33c3ccd280dd8dbd93028f01d71d87119a2009d38d2fab5647f not found: ID does not exist" containerID="39cbef45410dc33c3ccd280dd8dbd93028f01d71d87119a2009d38d2fab5647f" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.472354 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"39cbef45410dc33c3ccd280dd8dbd93028f01d71d87119a2009d38d2fab5647f"} err="failed to get container status \"39cbef45410dc33c3ccd280dd8dbd93028f01d71d87119a2009d38d2fab5647f\": rpc error: code = NotFound desc = could not find container \"39cbef45410dc33c3ccd280dd8dbd93028f01d71d87119a2009d38d2fab5647f\": container with ID starting with 39cbef45410dc33c3ccd280dd8dbd93028f01d71d87119a2009d38d2fab5647f not found: ID does not exist" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.472458 4753 scope.go:117] "RemoveContainer" containerID="c2c67d359cc4c35fc1b93d25f31dd1acbe50d819456e7ba45edbca5cc66b76ad" Jan 29 12:28:59 crc kubenswrapper[4753]: E0129 12:28:59.472855 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c2c67d359cc4c35fc1b93d25f31dd1acbe50d819456e7ba45edbca5cc66b76ad\": container with ID starting with c2c67d359cc4c35fc1b93d25f31dd1acbe50d819456e7ba45edbca5cc66b76ad not found: ID does not exist" containerID="c2c67d359cc4c35fc1b93d25f31dd1acbe50d819456e7ba45edbca5cc66b76ad" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.472878 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c2c67d359cc4c35fc1b93d25f31dd1acbe50d819456e7ba45edbca5cc66b76ad"} err="failed to get container status \"c2c67d359cc4c35fc1b93d25f31dd1acbe50d819456e7ba45edbca5cc66b76ad\": rpc error: code = NotFound desc = could not find container \"c2c67d359cc4c35fc1b93d25f31dd1acbe50d819456e7ba45edbca5cc66b76ad\": container with ID starting with c2c67d359cc4c35fc1b93d25f31dd1acbe50d819456e7ba45edbca5cc66b76ad not found: ID does not exist" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.472893 4753 scope.go:117] "RemoveContainer" containerID="ab8ab1b0296c8d63c94fe76b9be4dff85e2c3d3c5e119f4c61a4e952a3eacb57" Jan 29 12:28:59 crc kubenswrapper[4753]: E0129 12:28:59.473128 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ab8ab1b0296c8d63c94fe76b9be4dff85e2c3d3c5e119f4c61a4e952a3eacb57\": container with ID starting with ab8ab1b0296c8d63c94fe76b9be4dff85e2c3d3c5e119f4c61a4e952a3eacb57 not found: ID does not exist" containerID="ab8ab1b0296c8d63c94fe76b9be4dff85e2c3d3c5e119f4c61a4e952a3eacb57" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.473160 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ab8ab1b0296c8d63c94fe76b9be4dff85e2c3d3c5e119f4c61a4e952a3eacb57"} err="failed to get container status \"ab8ab1b0296c8d63c94fe76b9be4dff85e2c3d3c5e119f4c61a4e952a3eacb57\": rpc error: code = NotFound desc = could not find container \"ab8ab1b0296c8d63c94fe76b9be4dff85e2c3d3c5e119f4c61a4e952a3eacb57\": container with ID starting with ab8ab1b0296c8d63c94fe76b9be4dff85e2c3d3c5e119f4c61a4e952a3eacb57 not found: ID does not exist" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.473187 4753 scope.go:117] "RemoveContainer" containerID="c02e393358bd7fc72cbcc938357a001d8e45a3614c97f8c21ef2b425a11ea56c" Jan 29 12:28:59 crc kubenswrapper[4753]: E0129 12:28:59.473514 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c02e393358bd7fc72cbcc938357a001d8e45a3614c97f8c21ef2b425a11ea56c\": container with ID starting with c02e393358bd7fc72cbcc938357a001d8e45a3614c97f8c21ef2b425a11ea56c not found: ID does not exist" containerID="c02e393358bd7fc72cbcc938357a001d8e45a3614c97f8c21ef2b425a11ea56c" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.473617 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c02e393358bd7fc72cbcc938357a001d8e45a3614c97f8c21ef2b425a11ea56c"} err="failed to get container status \"c02e393358bd7fc72cbcc938357a001d8e45a3614c97f8c21ef2b425a11ea56c\": rpc error: code = NotFound desc = could not find container \"c02e393358bd7fc72cbcc938357a001d8e45a3614c97f8c21ef2b425a11ea56c\": container with ID starting with c02e393358bd7fc72cbcc938357a001d8e45a3614c97f8c21ef2b425a11ea56c not found: ID does not exist" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.473709 4753 scope.go:117] "RemoveContainer" containerID="8066888e8c4974033e42ad712726fb89b198041760f9941ccf400414fe67f7c4" Jan 29 12:28:59 crc kubenswrapper[4753]: E0129 12:28:59.474034 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8066888e8c4974033e42ad712726fb89b198041760f9941ccf400414fe67f7c4\": container with ID starting with 8066888e8c4974033e42ad712726fb89b198041760f9941ccf400414fe67f7c4 not found: ID does not exist" containerID="8066888e8c4974033e42ad712726fb89b198041760f9941ccf400414fe67f7c4" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.474135 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8066888e8c4974033e42ad712726fb89b198041760f9941ccf400414fe67f7c4"} err="failed to get container status \"8066888e8c4974033e42ad712726fb89b198041760f9941ccf400414fe67f7c4\": rpc error: code = NotFound desc = could not find container \"8066888e8c4974033e42ad712726fb89b198041760f9941ccf400414fe67f7c4\": container with ID starting with 8066888e8c4974033e42ad712726fb89b198041760f9941ccf400414fe67f7c4 not found: ID does not exist" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.474247 4753 scope.go:117] "RemoveContainer" containerID="62808980bc02e825395831219271d7565dbce4f504194b0b1d24f3ec68d29c7a" Jan 29 12:28:59 crc kubenswrapper[4753]: E0129 12:28:59.474631 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"62808980bc02e825395831219271d7565dbce4f504194b0b1d24f3ec68d29c7a\": container with ID starting with 62808980bc02e825395831219271d7565dbce4f504194b0b1d24f3ec68d29c7a not found: ID does not exist" containerID="62808980bc02e825395831219271d7565dbce4f504194b0b1d24f3ec68d29c7a" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.474659 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"62808980bc02e825395831219271d7565dbce4f504194b0b1d24f3ec68d29c7a"} err="failed to get container status \"62808980bc02e825395831219271d7565dbce4f504194b0b1d24f3ec68d29c7a\": rpc error: code = NotFound desc = could not find container \"62808980bc02e825395831219271d7565dbce4f504194b0b1d24f3ec68d29c7a\": container with ID starting with 62808980bc02e825395831219271d7565dbce4f504194b0b1d24f3ec68d29c7a not found: ID does not exist" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.474679 4753 scope.go:117] "RemoveContainer" containerID="e2f1954c5859377fa2807abc43754082b700caff19ed4069e8ed7c2b84149b60" Jan 29 12:28:59 crc kubenswrapper[4753]: E0129 12:28:59.474928 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e2f1954c5859377fa2807abc43754082b700caff19ed4069e8ed7c2b84149b60\": container with ID starting with e2f1954c5859377fa2807abc43754082b700caff19ed4069e8ed7c2b84149b60 not found: ID does not exist" containerID="e2f1954c5859377fa2807abc43754082b700caff19ed4069e8ed7c2b84149b60" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.474956 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e2f1954c5859377fa2807abc43754082b700caff19ed4069e8ed7c2b84149b60"} err="failed to get container status \"e2f1954c5859377fa2807abc43754082b700caff19ed4069e8ed7c2b84149b60\": rpc error: code = NotFound desc = could not find container \"e2f1954c5859377fa2807abc43754082b700caff19ed4069e8ed7c2b84149b60\": container with ID starting with e2f1954c5859377fa2807abc43754082b700caff19ed4069e8ed7c2b84149b60 not found: ID does not exist" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.474973 4753 scope.go:117] "RemoveContainer" containerID="e7da666c1e8d2b0689ffa24259c62b01b913567e00d5b3ed3b19dd77e0c6c533" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.493616 4753 scope.go:117] "RemoveContainer" containerID="955915dd25543e1efdb915a3879096fba3d38186fe0b1844ff742a04165dbcab" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.509508 4753 scope.go:117] "RemoveContainer" containerID="60fba7ddfb790e3ab32a5fcb0f5930bccf040407ee1755fd70b5ef489d123112" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.529904 4753 scope.go:117] "RemoveContainer" containerID="896609a3bbf20ce8ed0c2121726b125b6eb828e48157cc650e81099744c07e6a" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.573731 4753 scope.go:117] "RemoveContainer" containerID="b5dbc9e432cab22467e73f936a4bd9ed835e89758f8d7b9248aa062527139167" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.670750 4753 scope.go:117] "RemoveContainer" containerID="f600c9d18c7bef57d4cdc5e5e4f2ef9ad5b634df5b0c23fc5d34c56cbc0abc7c" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.688974 4753 scope.go:117] "RemoveContainer" containerID="f512c2300a33e2cb5b259c3418f458b2d6f86634ccce39b9adee884ae693a404" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.708059 4753 scope.go:117] "RemoveContainer" containerID="737616607dae86265208ff6f145edb0ce8b20dba96eda0daf445337b0fef1935" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.727792 4753 scope.go:117] "RemoveContainer" containerID="75aca0fed29877eb2ade05283d8faf4a4c3f794d44c85614b217f1bf2ae5b2ce" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.747596 4753 scope.go:117] "RemoveContainer" containerID="76c9500d039ba3a69ffb165677dd2e12bdadadeb261817ebd10496ce731615ce" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.764764 4753 scope.go:117] "RemoveContainer" containerID="167d4dbddcf9e32fccd34fe86b69996a5f6c260ba7e291f34f64d24b772b8ac8" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.781485 4753 scope.go:117] "RemoveContainer" containerID="984e7ec23fbb1ea4d65de4f32027ca929ec222f93bf54d3a02596d29a4717385" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.802429 4753 scope.go:117] "RemoveContainer" containerID="063062d207c79c3b7f996082ef0d8f966b55a5285627a39041e569d21584423f" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.824595 4753 scope.go:117] "RemoveContainer" containerID="af0f9b6fe749f96595fa413d19eb4d05d225aa607b0bdca7f6249d9c3c82af97" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.839211 4753 scope.go:117] "RemoveContainer" containerID="3726b25caf518cbf35e2f777e83e1ecfee7780f3edbad81b396ea283f5da3a4a" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.853155 4753 scope.go:117] "RemoveContainer" containerID="e7da666c1e8d2b0689ffa24259c62b01b913567e00d5b3ed3b19dd77e0c6c533" Jan 29 12:28:59 crc kubenswrapper[4753]: E0129 12:28:59.853637 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e7da666c1e8d2b0689ffa24259c62b01b913567e00d5b3ed3b19dd77e0c6c533\": container with ID starting with e7da666c1e8d2b0689ffa24259c62b01b913567e00d5b3ed3b19dd77e0c6c533 not found: ID does not exist" containerID="e7da666c1e8d2b0689ffa24259c62b01b913567e00d5b3ed3b19dd77e0c6c533" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.853713 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e7da666c1e8d2b0689ffa24259c62b01b913567e00d5b3ed3b19dd77e0c6c533"} err="failed to get container status \"e7da666c1e8d2b0689ffa24259c62b01b913567e00d5b3ed3b19dd77e0c6c533\": rpc error: code = NotFound desc = could not find container \"e7da666c1e8d2b0689ffa24259c62b01b913567e00d5b3ed3b19dd77e0c6c533\": container with ID starting with e7da666c1e8d2b0689ffa24259c62b01b913567e00d5b3ed3b19dd77e0c6c533 not found: ID does not exist" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.853754 4753 scope.go:117] "RemoveContainer" containerID="955915dd25543e1efdb915a3879096fba3d38186fe0b1844ff742a04165dbcab" Jan 29 12:28:59 crc kubenswrapper[4753]: E0129 12:28:59.854109 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"955915dd25543e1efdb915a3879096fba3d38186fe0b1844ff742a04165dbcab\": container with ID starting with 955915dd25543e1efdb915a3879096fba3d38186fe0b1844ff742a04165dbcab not found: ID does not exist" containerID="955915dd25543e1efdb915a3879096fba3d38186fe0b1844ff742a04165dbcab" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.854161 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"955915dd25543e1efdb915a3879096fba3d38186fe0b1844ff742a04165dbcab"} err="failed to get container status \"955915dd25543e1efdb915a3879096fba3d38186fe0b1844ff742a04165dbcab\": rpc error: code = NotFound desc = could not find container \"955915dd25543e1efdb915a3879096fba3d38186fe0b1844ff742a04165dbcab\": container with ID starting with 955915dd25543e1efdb915a3879096fba3d38186fe0b1844ff742a04165dbcab not found: ID does not exist" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.854192 4753 scope.go:117] "RemoveContainer" containerID="60fba7ddfb790e3ab32a5fcb0f5930bccf040407ee1755fd70b5ef489d123112" Jan 29 12:28:59 crc kubenswrapper[4753]: E0129 12:28:59.854542 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"60fba7ddfb790e3ab32a5fcb0f5930bccf040407ee1755fd70b5ef489d123112\": container with ID starting with 60fba7ddfb790e3ab32a5fcb0f5930bccf040407ee1755fd70b5ef489d123112 not found: ID does not exist" containerID="60fba7ddfb790e3ab32a5fcb0f5930bccf040407ee1755fd70b5ef489d123112" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.854571 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"60fba7ddfb790e3ab32a5fcb0f5930bccf040407ee1755fd70b5ef489d123112"} err="failed to get container status \"60fba7ddfb790e3ab32a5fcb0f5930bccf040407ee1755fd70b5ef489d123112\": rpc error: code = NotFound desc = could not find container \"60fba7ddfb790e3ab32a5fcb0f5930bccf040407ee1755fd70b5ef489d123112\": container with ID starting with 60fba7ddfb790e3ab32a5fcb0f5930bccf040407ee1755fd70b5ef489d123112 not found: ID does not exist" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.854589 4753 scope.go:117] "RemoveContainer" containerID="896609a3bbf20ce8ed0c2121726b125b6eb828e48157cc650e81099744c07e6a" Jan 29 12:28:59 crc kubenswrapper[4753]: E0129 12:28:59.854849 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"896609a3bbf20ce8ed0c2121726b125b6eb828e48157cc650e81099744c07e6a\": container with ID starting with 896609a3bbf20ce8ed0c2121726b125b6eb828e48157cc650e81099744c07e6a not found: ID does not exist" containerID="896609a3bbf20ce8ed0c2121726b125b6eb828e48157cc650e81099744c07e6a" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.854880 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"896609a3bbf20ce8ed0c2121726b125b6eb828e48157cc650e81099744c07e6a"} err="failed to get container status \"896609a3bbf20ce8ed0c2121726b125b6eb828e48157cc650e81099744c07e6a\": rpc error: code = NotFound desc = could not find container \"896609a3bbf20ce8ed0c2121726b125b6eb828e48157cc650e81099744c07e6a\": container with ID starting with 896609a3bbf20ce8ed0c2121726b125b6eb828e48157cc650e81099744c07e6a not found: ID does not exist" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.854905 4753 scope.go:117] "RemoveContainer" containerID="b5dbc9e432cab22467e73f936a4bd9ed835e89758f8d7b9248aa062527139167" Jan 29 12:28:59 crc kubenswrapper[4753]: E0129 12:28:59.855149 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b5dbc9e432cab22467e73f936a4bd9ed835e89758f8d7b9248aa062527139167\": container with ID starting with b5dbc9e432cab22467e73f936a4bd9ed835e89758f8d7b9248aa062527139167 not found: ID does not exist" containerID="b5dbc9e432cab22467e73f936a4bd9ed835e89758f8d7b9248aa062527139167" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.855174 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b5dbc9e432cab22467e73f936a4bd9ed835e89758f8d7b9248aa062527139167"} err="failed to get container status \"b5dbc9e432cab22467e73f936a4bd9ed835e89758f8d7b9248aa062527139167\": rpc error: code = NotFound desc = could not find container \"b5dbc9e432cab22467e73f936a4bd9ed835e89758f8d7b9248aa062527139167\": container with ID starting with b5dbc9e432cab22467e73f936a4bd9ed835e89758f8d7b9248aa062527139167 not found: ID does not exist" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.855187 4753 scope.go:117] "RemoveContainer" containerID="f600c9d18c7bef57d4cdc5e5e4f2ef9ad5b634df5b0c23fc5d34c56cbc0abc7c" Jan 29 12:28:59 crc kubenswrapper[4753]: E0129 12:28:59.855420 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f600c9d18c7bef57d4cdc5e5e4f2ef9ad5b634df5b0c23fc5d34c56cbc0abc7c\": container with ID starting with f600c9d18c7bef57d4cdc5e5e4f2ef9ad5b634df5b0c23fc5d34c56cbc0abc7c not found: ID does not exist" containerID="f600c9d18c7bef57d4cdc5e5e4f2ef9ad5b634df5b0c23fc5d34c56cbc0abc7c" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.855451 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f600c9d18c7bef57d4cdc5e5e4f2ef9ad5b634df5b0c23fc5d34c56cbc0abc7c"} err="failed to get container status \"f600c9d18c7bef57d4cdc5e5e4f2ef9ad5b634df5b0c23fc5d34c56cbc0abc7c\": rpc error: code = NotFound desc = could not find container \"f600c9d18c7bef57d4cdc5e5e4f2ef9ad5b634df5b0c23fc5d34c56cbc0abc7c\": container with ID starting with f600c9d18c7bef57d4cdc5e5e4f2ef9ad5b634df5b0c23fc5d34c56cbc0abc7c not found: ID does not exist" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.895851 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="39385d87-5895-47b3-8fa4-dff5549bca97" path="/var/lib/kubelet/pods/39385d87-5895-47b3-8fa4-dff5549bca97/volumes" Jan 29 12:28:59 crc kubenswrapper[4753]: I0129 12:28:59.898322 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ddeb279d-2025-4326-8e65-865d310f7c0a" path="/var/lib/kubelet/pods/ddeb279d-2025-4326-8e65-865d310f7c0a/volumes" Jan 29 12:29:00 crc kubenswrapper[4753]: I0129 12:29:00.036991 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc","Type":"ContainerDied","Data":"44898ac6a06f8bfc1c454ff65ed0567198704e01dd2cacf07c4f71d291cf462e"} Jan 29 12:29:00 crc kubenswrapper[4753]: I0129 12:29:00.037041 4753 scope.go:117] "RemoveContainer" containerID="089bfd5f212edcb63517b6be2c9e93e85788d0a36322b33d159dc4276eeffa8c" Jan 29 12:29:00 crc kubenswrapper[4753]: I0129 12:29:00.037270 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-storage-2" Jan 29 12:29:00 crc kubenswrapper[4753]: I0129 12:29:00.065061 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["swift-kuttl-tests/swift-storage-2"] Jan 29 12:29:00 crc kubenswrapper[4753]: I0129 12:29:00.067697 4753 scope.go:117] "RemoveContainer" containerID="263c42f493bd95e422bf98dec8882fa3d3524e56d9ce4bc9f9ecf43ceb8c3a2e" Jan 29 12:29:00 crc kubenswrapper[4753]: I0129 12:29:00.069406 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["swift-kuttl-tests/swift-storage-2"] Jan 29 12:29:00 crc kubenswrapper[4753]: I0129 12:29:00.091054 4753 scope.go:117] "RemoveContainer" containerID="e886a1fa21080f1c391ce8ba831cdb18c9181775550b92bbd69711a723a6c2ca" Jan 29 12:29:00 crc kubenswrapper[4753]: I0129 12:29:00.105206 4753 scope.go:117] "RemoveContainer" containerID="ff839774eea656b737782f6cc51d5decbaa50e4fcaeed6ad6d8c0f31282f312e" Jan 29 12:29:00 crc kubenswrapper[4753]: I0129 12:29:00.129497 4753 scope.go:117] "RemoveContainer" containerID="400a061c4bd0a357658de03c8e6bf769bb555b258f4dc874056c3bc75b8d5f62" Jan 29 12:29:00 crc kubenswrapper[4753]: I0129 12:29:00.145139 4753 scope.go:117] "RemoveContainer" containerID="8c90ac4f21fbe6fd19be163628758ffd03ac6be361d6e346e128041b97d6eb05" Jan 29 12:29:00 crc kubenswrapper[4753]: I0129 12:29:00.159826 4753 scope.go:117] "RemoveContainer" containerID="b601e419b6959550e2118567697a445d92d4c7c854603a5c05ad0a8bd8840b30" Jan 29 12:29:00 crc kubenswrapper[4753]: I0129 12:29:00.180110 4753 scope.go:117] "RemoveContainer" containerID="ff3a6b0d2ffa6545d5a3788e7c7dea23a83af8af68f1a3dbb4abcd33d05b9901" Jan 29 12:29:00 crc kubenswrapper[4753]: I0129 12:29:00.202916 4753 scope.go:117] "RemoveContainer" containerID="c61d771f239c1ddb78ae193005b7f28411347094488416d74c36b5a4e1af184e" Jan 29 12:29:00 crc kubenswrapper[4753]: I0129 12:29:00.222035 4753 scope.go:117] "RemoveContainer" containerID="7205dfe02ab43d1f70f31ef2ec36df92f3b6ddfe4b58c079fa1bef08461d8f7d" Jan 29 12:29:00 crc kubenswrapper[4753]: I0129 12:29:00.235868 4753 scope.go:117] "RemoveContainer" containerID="00c1eb87259c9f9ca790410d154830681a972b41be2f543d941b16c768d02c7a" Jan 29 12:29:00 crc kubenswrapper[4753]: I0129 12:29:00.256676 4753 scope.go:117] "RemoveContainer" containerID="4be3d87956e4951b9b943798ec79dccb05d39a88a8d1c684b6af2f4d1d49dd8a" Jan 29 12:29:00 crc kubenswrapper[4753]: I0129 12:29:00.280671 4753 scope.go:117] "RemoveContainer" containerID="0fdf7c43e207058700148694e8d0512172a5c08367d82685ee34cb21a7d777bb" Jan 29 12:29:00 crc kubenswrapper[4753]: I0129 12:29:00.303536 4753 scope.go:117] "RemoveContainer" containerID="ffb8afe70efce4899f28e6a84a2da82ec2152af28d185ef2246910c3630e20af" Jan 29 12:29:00 crc kubenswrapper[4753]: I0129 12:29:00.335468 4753 scope.go:117] "RemoveContainer" containerID="fe8f9ebcc1c86411e3bb093c67fb21d93a96fee55951ceee7e3131efeb068618" Jan 29 12:29:01 crc kubenswrapper[4753]: I0129 12:29:01.898051 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc" path="/var/lib/kubelet/pods/32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc/volumes" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.654366 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["swift-kuttl-tests/swift-storage-0"] Jan 29 12:29:02 crc kubenswrapper[4753]: E0129 12:29:02.654701 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc" containerName="container-server" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.654714 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc" containerName="container-server" Jan 29 12:29:02 crc kubenswrapper[4753]: E0129 12:29:02.654727 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc" containerName="object-replicator" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.654735 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc" containerName="object-replicator" Jan 29 12:29:02 crc kubenswrapper[4753]: E0129 12:29:02.654742 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc" containerName="account-auditor" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.654750 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc" containerName="account-auditor" Jan 29 12:29:02 crc kubenswrapper[4753]: E0129 12:29:02.654759 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc" containerName="container-replicator" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.654765 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc" containerName="container-replicator" Jan 29 12:29:02 crc kubenswrapper[4753]: E0129 12:29:02.654779 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="39385d87-5895-47b3-8fa4-dff5549bca97" containerName="rsync" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.654787 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="39385d87-5895-47b3-8fa4-dff5549bca97" containerName="rsync" Jan 29 12:29:02 crc kubenswrapper[4753]: E0129 12:29:02.654796 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ddeb279d-2025-4326-8e65-865d310f7c0a" containerName="rsync" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.654801 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="ddeb279d-2025-4326-8e65-865d310f7c0a" containerName="rsync" Jan 29 12:29:02 crc kubenswrapper[4753]: E0129 12:29:02.654812 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="39385d87-5895-47b3-8fa4-dff5549bca97" containerName="object-server" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.654817 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="39385d87-5895-47b3-8fa4-dff5549bca97" containerName="object-server" Jan 29 12:29:02 crc kubenswrapper[4753]: E0129 12:29:02.654826 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc" containerName="object-server" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.654834 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc" containerName="object-server" Jan 29 12:29:02 crc kubenswrapper[4753]: E0129 12:29:02.654840 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc" containerName="object-auditor" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.654847 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc" containerName="object-auditor" Jan 29 12:29:02 crc kubenswrapper[4753]: E0129 12:29:02.654865 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc" containerName="container-updater" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.654871 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc" containerName="container-updater" Jan 29 12:29:02 crc kubenswrapper[4753]: E0129 12:29:02.654882 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="39385d87-5895-47b3-8fa4-dff5549bca97" containerName="object-replicator" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.654889 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="39385d87-5895-47b3-8fa4-dff5549bca97" containerName="object-replicator" Jan 29 12:29:02 crc kubenswrapper[4753]: E0129 12:29:02.654900 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ddeb279d-2025-4326-8e65-865d310f7c0a" containerName="container-server" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.654907 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="ddeb279d-2025-4326-8e65-865d310f7c0a" containerName="container-server" Jan 29 12:29:02 crc kubenswrapper[4753]: E0129 12:29:02.654916 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ddeb279d-2025-4326-8e65-865d310f7c0a" containerName="account-server" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.654922 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="ddeb279d-2025-4326-8e65-865d310f7c0a" containerName="account-server" Jan 29 12:29:02 crc kubenswrapper[4753]: E0129 12:29:02.654932 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="39385d87-5895-47b3-8fa4-dff5549bca97" containerName="object-auditor" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.654938 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="39385d87-5895-47b3-8fa4-dff5549bca97" containerName="object-auditor" Jan 29 12:29:02 crc kubenswrapper[4753]: E0129 12:29:02.654946 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ddeb279d-2025-4326-8e65-865d310f7c0a" containerName="container-updater" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.654952 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="ddeb279d-2025-4326-8e65-865d310f7c0a" containerName="container-updater" Jan 29 12:29:02 crc kubenswrapper[4753]: E0129 12:29:02.654962 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1279ea7f-350d-4657-8d79-d384e6974700" containerName="proxy-httpd" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.654968 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="1279ea7f-350d-4657-8d79-d384e6974700" containerName="proxy-httpd" Jan 29 12:29:02 crc kubenswrapper[4753]: E0129 12:29:02.654976 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ddeb279d-2025-4326-8e65-865d310f7c0a" containerName="swift-recon-cron" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.654982 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="ddeb279d-2025-4326-8e65-865d310f7c0a" containerName="swift-recon-cron" Jan 29 12:29:02 crc kubenswrapper[4753]: E0129 12:29:02.654988 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ddeb279d-2025-4326-8e65-865d310f7c0a" containerName="container-auditor" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.654994 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="ddeb279d-2025-4326-8e65-865d310f7c0a" containerName="container-auditor" Jan 29 12:29:02 crc kubenswrapper[4753]: E0129 12:29:02.655006 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ddeb279d-2025-4326-8e65-865d310f7c0a" containerName="object-expirer" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.655014 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="ddeb279d-2025-4326-8e65-865d310f7c0a" containerName="object-expirer" Jan 29 12:29:02 crc kubenswrapper[4753]: E0129 12:29:02.655027 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="39385d87-5895-47b3-8fa4-dff5549bca97" containerName="container-updater" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.655035 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="39385d87-5895-47b3-8fa4-dff5549bca97" containerName="container-updater" Jan 29 12:29:02 crc kubenswrapper[4753]: E0129 12:29:02.655047 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ddeb279d-2025-4326-8e65-865d310f7c0a" containerName="container-replicator" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.655054 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="ddeb279d-2025-4326-8e65-865d310f7c0a" containerName="container-replicator" Jan 29 12:29:02 crc kubenswrapper[4753]: E0129 12:29:02.655062 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="39385d87-5895-47b3-8fa4-dff5549bca97" containerName="account-auditor" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.655070 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="39385d87-5895-47b3-8fa4-dff5549bca97" containerName="account-auditor" Jan 29 12:29:02 crc kubenswrapper[4753]: E0129 12:29:02.655082 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="39385d87-5895-47b3-8fa4-dff5549bca97" containerName="container-replicator" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.655088 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="39385d87-5895-47b3-8fa4-dff5549bca97" containerName="container-replicator" Jan 29 12:29:02 crc kubenswrapper[4753]: E0129 12:29:02.655099 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc" containerName="account-server" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.655106 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc" containerName="account-server" Jan 29 12:29:02 crc kubenswrapper[4753]: E0129 12:29:02.655117 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="39385d87-5895-47b3-8fa4-dff5549bca97" containerName="account-server" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.655124 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="39385d87-5895-47b3-8fa4-dff5549bca97" containerName="account-server" Jan 29 12:29:02 crc kubenswrapper[4753]: E0129 12:29:02.655138 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ddeb279d-2025-4326-8e65-865d310f7c0a" containerName="object-replicator" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.655145 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="ddeb279d-2025-4326-8e65-865d310f7c0a" containerName="object-replicator" Jan 29 12:29:02 crc kubenswrapper[4753]: E0129 12:29:02.655155 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ddeb279d-2025-4326-8e65-865d310f7c0a" containerName="object-server" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.655162 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="ddeb279d-2025-4326-8e65-865d310f7c0a" containerName="object-server" Jan 29 12:29:02 crc kubenswrapper[4753]: E0129 12:29:02.655173 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1279ea7f-350d-4657-8d79-d384e6974700" containerName="proxy-server" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.655179 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="1279ea7f-350d-4657-8d79-d384e6974700" containerName="proxy-server" Jan 29 12:29:02 crc kubenswrapper[4753]: E0129 12:29:02.655192 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="39385d87-5895-47b3-8fa4-dff5549bca97" containerName="swift-recon-cron" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.655199 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="39385d87-5895-47b3-8fa4-dff5549bca97" containerName="swift-recon-cron" Jan 29 12:29:02 crc kubenswrapper[4753]: E0129 12:29:02.655207 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ddeb279d-2025-4326-8e65-865d310f7c0a" containerName="object-auditor" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.655214 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="ddeb279d-2025-4326-8e65-865d310f7c0a" containerName="object-auditor" Jan 29 12:29:02 crc kubenswrapper[4753]: E0129 12:29:02.655298 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc" containerName="account-replicator" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.655308 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc" containerName="account-replicator" Jan 29 12:29:02 crc kubenswrapper[4753]: E0129 12:29:02.655318 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ddeb279d-2025-4326-8e65-865d310f7c0a" containerName="account-replicator" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.655325 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="ddeb279d-2025-4326-8e65-865d310f7c0a" containerName="account-replicator" Jan 29 12:29:02 crc kubenswrapper[4753]: E0129 12:29:02.655332 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc" containerName="account-reaper" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.655338 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc" containerName="account-reaper" Jan 29 12:29:02 crc kubenswrapper[4753]: E0129 12:29:02.655344 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc" containerName="object-updater" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.655352 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc" containerName="object-updater" Jan 29 12:29:02 crc kubenswrapper[4753]: E0129 12:29:02.655359 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="39385d87-5895-47b3-8fa4-dff5549bca97" containerName="account-replicator" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.655365 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="39385d87-5895-47b3-8fa4-dff5549bca97" containerName="account-replicator" Jan 29 12:29:02 crc kubenswrapper[4753]: E0129 12:29:02.655376 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ddeb279d-2025-4326-8e65-865d310f7c0a" containerName="account-reaper" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.655381 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="ddeb279d-2025-4326-8e65-865d310f7c0a" containerName="account-reaper" Jan 29 12:29:02 crc kubenswrapper[4753]: E0129 12:29:02.655389 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="39385d87-5895-47b3-8fa4-dff5549bca97" containerName="object-expirer" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.655395 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="39385d87-5895-47b3-8fa4-dff5549bca97" containerName="object-expirer" Jan 29 12:29:02 crc kubenswrapper[4753]: E0129 12:29:02.655403 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ddeb279d-2025-4326-8e65-865d310f7c0a" containerName="account-auditor" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.655409 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="ddeb279d-2025-4326-8e65-865d310f7c0a" containerName="account-auditor" Jan 29 12:29:02 crc kubenswrapper[4753]: E0129 12:29:02.655419 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="39385d87-5895-47b3-8fa4-dff5549bca97" containerName="container-auditor" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.655424 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="39385d87-5895-47b3-8fa4-dff5549bca97" containerName="container-auditor" Jan 29 12:29:02 crc kubenswrapper[4753]: E0129 12:29:02.655432 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ddeb279d-2025-4326-8e65-865d310f7c0a" containerName="object-updater" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.655437 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="ddeb279d-2025-4326-8e65-865d310f7c0a" containerName="object-updater" Jan 29 12:29:02 crc kubenswrapper[4753]: E0129 12:29:02.655446 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="39385d87-5895-47b3-8fa4-dff5549bca97" containerName="container-server" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.655452 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="39385d87-5895-47b3-8fa4-dff5549bca97" containerName="container-server" Jan 29 12:29:02 crc kubenswrapper[4753]: E0129 12:29:02.655462 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc" containerName="object-expirer" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.655468 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc" containerName="object-expirer" Jan 29 12:29:02 crc kubenswrapper[4753]: E0129 12:29:02.655476 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="39385d87-5895-47b3-8fa4-dff5549bca97" containerName="object-updater" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.655481 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="39385d87-5895-47b3-8fa4-dff5549bca97" containerName="object-updater" Jan 29 12:29:02 crc kubenswrapper[4753]: E0129 12:29:02.655491 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc" containerName="container-auditor" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.655497 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc" containerName="container-auditor" Jan 29 12:29:02 crc kubenswrapper[4753]: E0129 12:29:02.655505 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="39385d87-5895-47b3-8fa4-dff5549bca97" containerName="account-reaper" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.655511 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="39385d87-5895-47b3-8fa4-dff5549bca97" containerName="account-reaper" Jan 29 12:29:02 crc kubenswrapper[4753]: E0129 12:29:02.655520 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cf9cdf86-3873-41b5-be7c-2aeae3201b49" containerName="swift-ring-rebalance" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.655526 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="cf9cdf86-3873-41b5-be7c-2aeae3201b49" containerName="swift-ring-rebalance" Jan 29 12:29:02 crc kubenswrapper[4753]: E0129 12:29:02.655534 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc" containerName="swift-recon-cron" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.655539 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc" containerName="swift-recon-cron" Jan 29 12:29:02 crc kubenswrapper[4753]: E0129 12:29:02.655547 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc" containerName="rsync" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.655552 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc" containerName="rsync" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.655683 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="39385d87-5895-47b3-8fa4-dff5549bca97" containerName="container-auditor" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.655694 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="ddeb279d-2025-4326-8e65-865d310f7c0a" containerName="account-auditor" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.655703 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="ddeb279d-2025-4326-8e65-865d310f7c0a" containerName="account-replicator" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.655711 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="ddeb279d-2025-4326-8e65-865d310f7c0a" containerName="account-reaper" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.655717 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="39385d87-5895-47b3-8fa4-dff5549bca97" containerName="object-server" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.655724 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="ddeb279d-2025-4326-8e65-865d310f7c0a" containerName="rsync" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.655731 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="39385d87-5895-47b3-8fa4-dff5549bca97" containerName="account-reaper" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.655739 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="39385d87-5895-47b3-8fa4-dff5549bca97" containerName="rsync" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.655745 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc" containerName="container-replicator" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.655753 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="39385d87-5895-47b3-8fa4-dff5549bca97" containerName="swift-recon-cron" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.655761 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="1279ea7f-350d-4657-8d79-d384e6974700" containerName="proxy-httpd" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.655771 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc" containerName="account-auditor" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.655779 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc" containerName="object-server" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.655786 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="ddeb279d-2025-4326-8e65-865d310f7c0a" containerName="object-updater" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.655795 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="39385d87-5895-47b3-8fa4-dff5549bca97" containerName="container-replicator" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.655802 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="ddeb279d-2025-4326-8e65-865d310f7c0a" containerName="object-replicator" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.655808 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc" containerName="container-auditor" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.655816 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc" containerName="account-replicator" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.655823 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc" containerName="object-auditor" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.655830 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc" containerName="account-reaper" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.655835 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="ddeb279d-2025-4326-8e65-865d310f7c0a" containerName="container-auditor" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.655843 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="ddeb279d-2025-4326-8e65-865d310f7c0a" containerName="account-server" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.655849 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="1279ea7f-350d-4657-8d79-d384e6974700" containerName="proxy-server" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.655855 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc" containerName="account-server" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.655863 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc" containerName="container-server" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.655869 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc" containerName="object-updater" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.655878 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="39385d87-5895-47b3-8fa4-dff5549bca97" containerName="object-expirer" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.655886 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="39385d87-5895-47b3-8fa4-dff5549bca97" containerName="account-auditor" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.655893 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="39385d87-5895-47b3-8fa4-dff5549bca97" containerName="object-updater" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.655902 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc" containerName="rsync" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.655907 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="39385d87-5895-47b3-8fa4-dff5549bca97" containerName="account-replicator" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.655914 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="ddeb279d-2025-4326-8e65-865d310f7c0a" containerName="container-updater" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.655922 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="cf9cdf86-3873-41b5-be7c-2aeae3201b49" containerName="swift-ring-rebalance" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.655929 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="39385d87-5895-47b3-8fa4-dff5549bca97" containerName="container-updater" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.655937 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="ddeb279d-2025-4326-8e65-865d310f7c0a" containerName="object-server" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.655947 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc" containerName="object-replicator" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.655954 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="ddeb279d-2025-4326-8e65-865d310f7c0a" containerName="object-auditor" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.655960 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="ddeb279d-2025-4326-8e65-865d310f7c0a" containerName="container-server" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.655967 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="ddeb279d-2025-4326-8e65-865d310f7c0a" containerName="object-expirer" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.655973 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc" containerName="object-expirer" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.655980 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="ddeb279d-2025-4326-8e65-865d310f7c0a" containerName="swift-recon-cron" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.655988 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="39385d87-5895-47b3-8fa4-dff5549bca97" containerName="account-server" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.655996 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="39385d87-5895-47b3-8fa4-dff5549bca97" containerName="container-server" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.656004 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc" containerName="container-updater" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.656011 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="32ea34f5-5f12-4c38-8b0b-6ca91dfbb2dc" containerName="swift-recon-cron" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.656017 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="39385d87-5895-47b3-8fa4-dff5549bca97" containerName="object-replicator" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.656024 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="39385d87-5895-47b3-8fa4-dff5549bca97" containerName="object-auditor" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.656041 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="ddeb279d-2025-4326-8e65-865d310f7c0a" containerName="container-replicator" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.660287 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.665184 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"swift-kuttl-tests"/"swift-storage-config-data" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.665491 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"swift-kuttl-tests"/"swift-ring-files" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.665718 4753 reflector.go:368] Caches populated for *v1.Secret from object-"swift-kuttl-tests"/"swift-swift-dockercfg-6vlv4" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.666390 4753 reflector.go:368] Caches populated for *v1.Secret from object-"swift-kuttl-tests"/"swift-conf" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.719138 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/swift-storage-0"] Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.936213 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/138a8739-c950-4fe4-9aaa-e232bdea3f7b-cache\") pod \"swift-storage-0\" (UID: \"138a8739-c950-4fe4-9aaa-e232bdea3f7b\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.936335 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/138a8739-c950-4fe4-9aaa-e232bdea3f7b-lock\") pod \"swift-storage-0\" (UID: \"138a8739-c950-4fe4-9aaa-e232bdea3f7b\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.936615 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/138a8739-c950-4fe4-9aaa-e232bdea3f7b-etc-swift\") pod \"swift-storage-0\" (UID: \"138a8739-c950-4fe4-9aaa-e232bdea3f7b\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.936686 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jlczr\" (UniqueName: \"kubernetes.io/projected/138a8739-c950-4fe4-9aaa-e232bdea3f7b-kube-api-access-jlczr\") pod \"swift-storage-0\" (UID: \"138a8739-c950-4fe4-9aaa-e232bdea3f7b\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:29:02 crc kubenswrapper[4753]: I0129 12:29:02.936932 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"swift-storage-0\" (UID: \"138a8739-c950-4fe4-9aaa-e232bdea3f7b\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:29:03 crc kubenswrapper[4753]: I0129 12:29:03.025433 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["swift-kuttl-tests/swift-ring-rebalance-dlpgk"] Jan 29 12:29:03 crc kubenswrapper[4753]: I0129 12:29:03.026636 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-ring-rebalance-dlpgk" Jan 29 12:29:03 crc kubenswrapper[4753]: I0129 12:29:03.028818 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"swift-kuttl-tests"/"swift-ring-scripts" Jan 29 12:29:03 crc kubenswrapper[4753]: I0129 12:29:03.028983 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"swift-kuttl-tests"/"swift-ring-config-data" Jan 29 12:29:03 crc kubenswrapper[4753]: I0129 12:29:03.028881 4753 reflector.go:368] Caches populated for *v1.Secret from object-"swift-kuttl-tests"/"swift-proxy-config-data" Jan 29 12:29:03 crc kubenswrapper[4753]: I0129 12:29:03.038424 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"swift-storage-0\" (UID: \"138a8739-c950-4fe4-9aaa-e232bdea3f7b\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:29:03 crc kubenswrapper[4753]: I0129 12:29:03.038513 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/138a8739-c950-4fe4-9aaa-e232bdea3f7b-cache\") pod \"swift-storage-0\" (UID: \"138a8739-c950-4fe4-9aaa-e232bdea3f7b\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:29:03 crc kubenswrapper[4753]: I0129 12:29:03.038603 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/138a8739-c950-4fe4-9aaa-e232bdea3f7b-lock\") pod \"swift-storage-0\" (UID: \"138a8739-c950-4fe4-9aaa-e232bdea3f7b\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:29:03 crc kubenswrapper[4753]: I0129 12:29:03.038683 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/138a8739-c950-4fe4-9aaa-e232bdea3f7b-etc-swift\") pod \"swift-storage-0\" (UID: \"138a8739-c950-4fe4-9aaa-e232bdea3f7b\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:29:03 crc kubenswrapper[4753]: I0129 12:29:03.038713 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jlczr\" (UniqueName: \"kubernetes.io/projected/138a8739-c950-4fe4-9aaa-e232bdea3f7b-kube-api-access-jlczr\") pod \"swift-storage-0\" (UID: \"138a8739-c950-4fe4-9aaa-e232bdea3f7b\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:29:03 crc kubenswrapper[4753]: E0129 12:29:03.038965 4753 projected.go:288] Couldn't get configMap swift-kuttl-tests/swift-ring-files: configmap "swift-ring-files" not found Jan 29 12:29:03 crc kubenswrapper[4753]: E0129 12:29:03.039021 4753 projected.go:194] Error preparing data for projected volume etc-swift for pod swift-kuttl-tests/swift-storage-0: configmap "swift-ring-files" not found Jan 29 12:29:03 crc kubenswrapper[4753]: E0129 12:29:03.039135 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/138a8739-c950-4fe4-9aaa-e232bdea3f7b-etc-swift podName:138a8739-c950-4fe4-9aaa-e232bdea3f7b nodeName:}" failed. No retries permitted until 2026-01-29 12:29:03.539095077 +0000 UTC m=+1357.791176532 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/138a8739-c950-4fe4-9aaa-e232bdea3f7b-etc-swift") pod "swift-storage-0" (UID: "138a8739-c950-4fe4-9aaa-e232bdea3f7b") : configmap "swift-ring-files" not found Jan 29 12:29:03 crc kubenswrapper[4753]: I0129 12:29:03.039274 4753 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"swift-storage-0\" (UID: \"138a8739-c950-4fe4-9aaa-e232bdea3f7b\") device mount path \"/mnt/openstack/pv05\"" pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:29:03 crc kubenswrapper[4753]: I0129 12:29:03.039344 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/138a8739-c950-4fe4-9aaa-e232bdea3f7b-cache\") pod \"swift-storage-0\" (UID: \"138a8739-c950-4fe4-9aaa-e232bdea3f7b\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:29:03 crc kubenswrapper[4753]: I0129 12:29:03.039382 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/138a8739-c950-4fe4-9aaa-e232bdea3f7b-lock\") pod \"swift-storage-0\" (UID: \"138a8739-c950-4fe4-9aaa-e232bdea3f7b\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:29:03 crc kubenswrapper[4753]: I0129 12:29:03.062293 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["swift-kuttl-tests/swift-ring-rebalance-dlpgk"] Jan 29 12:29:03 crc kubenswrapper[4753]: E0129 12:29:03.063158 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[dispersionconf etc-swift kube-api-access-x5vwn ring-data-devices scripts swiftconf], unattached volumes=[], failed to process volumes=[]: context canceled" pod="swift-kuttl-tests/swift-ring-rebalance-dlpgk" podUID="37565540-2b9c-4657-bc00-b79793e082a0" Jan 29 12:29:03 crc kubenswrapper[4753]: I0129 12:29:03.068797 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"swift-storage-0\" (UID: \"138a8739-c950-4fe4-9aaa-e232bdea3f7b\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:29:03 crc kubenswrapper[4753]: I0129 12:29:03.072735 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["swift-kuttl-tests/swift-ring-rebalance-7494v"] Jan 29 12:29:03 crc kubenswrapper[4753]: I0129 12:29:03.073972 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-ring-rebalance-7494v" Jan 29 12:29:03 crc kubenswrapper[4753]: I0129 12:29:03.099595 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jlczr\" (UniqueName: \"kubernetes.io/projected/138a8739-c950-4fe4-9aaa-e232bdea3f7b-kube-api-access-jlczr\") pod \"swift-storage-0\" (UID: \"138a8739-c950-4fe4-9aaa-e232bdea3f7b\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:29:03 crc kubenswrapper[4753]: I0129 12:29:03.116011 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/swift-ring-rebalance-7494v"] Jan 29 12:29:03 crc kubenswrapper[4753]: I0129 12:29:03.123278 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["swift-kuttl-tests/swift-ring-rebalance-dlpgk"] Jan 29 12:29:03 crc kubenswrapper[4753]: I0129 12:29:03.139665 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/37565540-2b9c-4657-bc00-b79793e082a0-swiftconf\") pod \"swift-ring-rebalance-dlpgk\" (UID: \"37565540-2b9c-4657-bc00-b79793e082a0\") " pod="swift-kuttl-tests/swift-ring-rebalance-dlpgk" Jan 29 12:29:03 crc kubenswrapper[4753]: I0129 12:29:03.139857 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x5vwn\" (UniqueName: \"kubernetes.io/projected/37565540-2b9c-4657-bc00-b79793e082a0-kube-api-access-x5vwn\") pod \"swift-ring-rebalance-dlpgk\" (UID: \"37565540-2b9c-4657-bc00-b79793e082a0\") " pod="swift-kuttl-tests/swift-ring-rebalance-dlpgk" Jan 29 12:29:03 crc kubenswrapper[4753]: I0129 12:29:03.139902 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/37565540-2b9c-4657-bc00-b79793e082a0-dispersionconf\") pod \"swift-ring-rebalance-dlpgk\" (UID: \"37565540-2b9c-4657-bc00-b79793e082a0\") " pod="swift-kuttl-tests/swift-ring-rebalance-dlpgk" Jan 29 12:29:03 crc kubenswrapper[4753]: I0129 12:29:03.139928 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/37565540-2b9c-4657-bc00-b79793e082a0-etc-swift\") pod \"swift-ring-rebalance-dlpgk\" (UID: \"37565540-2b9c-4657-bc00-b79793e082a0\") " pod="swift-kuttl-tests/swift-ring-rebalance-dlpgk" Jan 29 12:29:03 crc kubenswrapper[4753]: I0129 12:29:03.139946 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/37565540-2b9c-4657-bc00-b79793e082a0-ring-data-devices\") pod \"swift-ring-rebalance-dlpgk\" (UID: \"37565540-2b9c-4657-bc00-b79793e082a0\") " pod="swift-kuttl-tests/swift-ring-rebalance-dlpgk" Jan 29 12:29:03 crc kubenswrapper[4753]: I0129 12:29:03.139992 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/37565540-2b9c-4657-bc00-b79793e082a0-scripts\") pod \"swift-ring-rebalance-dlpgk\" (UID: \"37565540-2b9c-4657-bc00-b79793e082a0\") " pod="swift-kuttl-tests/swift-ring-rebalance-dlpgk" Jan 29 12:29:03 crc kubenswrapper[4753]: I0129 12:29:03.241177 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2b27a01f-bed5-4012-ab93-63e3095aecad-scripts\") pod \"swift-ring-rebalance-7494v\" (UID: \"2b27a01f-bed5-4012-ab93-63e3095aecad\") " pod="swift-kuttl-tests/swift-ring-rebalance-7494v" Jan 29 12:29:03 crc kubenswrapper[4753]: I0129 12:29:03.241274 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jss8c\" (UniqueName: \"kubernetes.io/projected/2b27a01f-bed5-4012-ab93-63e3095aecad-kube-api-access-jss8c\") pod \"swift-ring-rebalance-7494v\" (UID: \"2b27a01f-bed5-4012-ab93-63e3095aecad\") " pod="swift-kuttl-tests/swift-ring-rebalance-7494v" Jan 29 12:29:03 crc kubenswrapper[4753]: I0129 12:29:03.241310 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/2b27a01f-bed5-4012-ab93-63e3095aecad-etc-swift\") pod \"swift-ring-rebalance-7494v\" (UID: \"2b27a01f-bed5-4012-ab93-63e3095aecad\") " pod="swift-kuttl-tests/swift-ring-rebalance-7494v" Jan 29 12:29:03 crc kubenswrapper[4753]: I0129 12:29:03.241476 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/2b27a01f-bed5-4012-ab93-63e3095aecad-ring-data-devices\") pod \"swift-ring-rebalance-7494v\" (UID: \"2b27a01f-bed5-4012-ab93-63e3095aecad\") " pod="swift-kuttl-tests/swift-ring-rebalance-7494v" Jan 29 12:29:03 crc kubenswrapper[4753]: I0129 12:29:03.241538 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x5vwn\" (UniqueName: \"kubernetes.io/projected/37565540-2b9c-4657-bc00-b79793e082a0-kube-api-access-x5vwn\") pod \"swift-ring-rebalance-dlpgk\" (UID: \"37565540-2b9c-4657-bc00-b79793e082a0\") " pod="swift-kuttl-tests/swift-ring-rebalance-dlpgk" Jan 29 12:29:03 crc kubenswrapper[4753]: I0129 12:29:03.241607 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/37565540-2b9c-4657-bc00-b79793e082a0-dispersionconf\") pod \"swift-ring-rebalance-dlpgk\" (UID: \"37565540-2b9c-4657-bc00-b79793e082a0\") " pod="swift-kuttl-tests/swift-ring-rebalance-dlpgk" Jan 29 12:29:03 crc kubenswrapper[4753]: I0129 12:29:03.241647 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/37565540-2b9c-4657-bc00-b79793e082a0-etc-swift\") pod \"swift-ring-rebalance-dlpgk\" (UID: \"37565540-2b9c-4657-bc00-b79793e082a0\") " pod="swift-kuttl-tests/swift-ring-rebalance-dlpgk" Jan 29 12:29:03 crc kubenswrapper[4753]: I0129 12:29:03.241664 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/37565540-2b9c-4657-bc00-b79793e082a0-ring-data-devices\") pod \"swift-ring-rebalance-dlpgk\" (UID: \"37565540-2b9c-4657-bc00-b79793e082a0\") " pod="swift-kuttl-tests/swift-ring-rebalance-dlpgk" Jan 29 12:29:03 crc kubenswrapper[4753]: I0129 12:29:03.241698 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/2b27a01f-bed5-4012-ab93-63e3095aecad-swiftconf\") pod \"swift-ring-rebalance-7494v\" (UID: \"2b27a01f-bed5-4012-ab93-63e3095aecad\") " pod="swift-kuttl-tests/swift-ring-rebalance-7494v" Jan 29 12:29:03 crc kubenswrapper[4753]: I0129 12:29:03.241772 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/37565540-2b9c-4657-bc00-b79793e082a0-scripts\") pod \"swift-ring-rebalance-dlpgk\" (UID: \"37565540-2b9c-4657-bc00-b79793e082a0\") " pod="swift-kuttl-tests/swift-ring-rebalance-dlpgk" Jan 29 12:29:03 crc kubenswrapper[4753]: I0129 12:29:03.241797 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/37565540-2b9c-4657-bc00-b79793e082a0-swiftconf\") pod \"swift-ring-rebalance-dlpgk\" (UID: \"37565540-2b9c-4657-bc00-b79793e082a0\") " pod="swift-kuttl-tests/swift-ring-rebalance-dlpgk" Jan 29 12:29:03 crc kubenswrapper[4753]: I0129 12:29:03.241863 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/2b27a01f-bed5-4012-ab93-63e3095aecad-dispersionconf\") pod \"swift-ring-rebalance-7494v\" (UID: \"2b27a01f-bed5-4012-ab93-63e3095aecad\") " pod="swift-kuttl-tests/swift-ring-rebalance-7494v" Jan 29 12:29:03 crc kubenswrapper[4753]: I0129 12:29:03.242680 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/37565540-2b9c-4657-bc00-b79793e082a0-ring-data-devices\") pod \"swift-ring-rebalance-dlpgk\" (UID: \"37565540-2b9c-4657-bc00-b79793e082a0\") " pod="swift-kuttl-tests/swift-ring-rebalance-dlpgk" Jan 29 12:29:03 crc kubenswrapper[4753]: I0129 12:29:03.242788 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/37565540-2b9c-4657-bc00-b79793e082a0-scripts\") pod \"swift-ring-rebalance-dlpgk\" (UID: \"37565540-2b9c-4657-bc00-b79793e082a0\") " pod="swift-kuttl-tests/swift-ring-rebalance-dlpgk" Jan 29 12:29:03 crc kubenswrapper[4753]: I0129 12:29:03.243133 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/37565540-2b9c-4657-bc00-b79793e082a0-etc-swift\") pod \"swift-ring-rebalance-dlpgk\" (UID: \"37565540-2b9c-4657-bc00-b79793e082a0\") " pod="swift-kuttl-tests/swift-ring-rebalance-dlpgk" Jan 29 12:29:03 crc kubenswrapper[4753]: I0129 12:29:03.245032 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/37565540-2b9c-4657-bc00-b79793e082a0-swiftconf\") pod \"swift-ring-rebalance-dlpgk\" (UID: \"37565540-2b9c-4657-bc00-b79793e082a0\") " pod="swift-kuttl-tests/swift-ring-rebalance-dlpgk" Jan 29 12:29:03 crc kubenswrapper[4753]: I0129 12:29:03.247555 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/37565540-2b9c-4657-bc00-b79793e082a0-dispersionconf\") pod \"swift-ring-rebalance-dlpgk\" (UID: \"37565540-2b9c-4657-bc00-b79793e082a0\") " pod="swift-kuttl-tests/swift-ring-rebalance-dlpgk" Jan 29 12:29:03 crc kubenswrapper[4753]: I0129 12:29:03.261215 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x5vwn\" (UniqueName: \"kubernetes.io/projected/37565540-2b9c-4657-bc00-b79793e082a0-kube-api-access-x5vwn\") pod \"swift-ring-rebalance-dlpgk\" (UID: \"37565540-2b9c-4657-bc00-b79793e082a0\") " pod="swift-kuttl-tests/swift-ring-rebalance-dlpgk" Jan 29 12:29:03 crc kubenswrapper[4753]: I0129 12:29:03.343838 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2b27a01f-bed5-4012-ab93-63e3095aecad-scripts\") pod \"swift-ring-rebalance-7494v\" (UID: \"2b27a01f-bed5-4012-ab93-63e3095aecad\") " pod="swift-kuttl-tests/swift-ring-rebalance-7494v" Jan 29 12:29:03 crc kubenswrapper[4753]: I0129 12:29:03.343913 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jss8c\" (UniqueName: \"kubernetes.io/projected/2b27a01f-bed5-4012-ab93-63e3095aecad-kube-api-access-jss8c\") pod \"swift-ring-rebalance-7494v\" (UID: \"2b27a01f-bed5-4012-ab93-63e3095aecad\") " pod="swift-kuttl-tests/swift-ring-rebalance-7494v" Jan 29 12:29:03 crc kubenswrapper[4753]: I0129 12:29:03.343946 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/2b27a01f-bed5-4012-ab93-63e3095aecad-etc-swift\") pod \"swift-ring-rebalance-7494v\" (UID: \"2b27a01f-bed5-4012-ab93-63e3095aecad\") " pod="swift-kuttl-tests/swift-ring-rebalance-7494v" Jan 29 12:29:03 crc kubenswrapper[4753]: I0129 12:29:03.343969 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/2b27a01f-bed5-4012-ab93-63e3095aecad-ring-data-devices\") pod \"swift-ring-rebalance-7494v\" (UID: \"2b27a01f-bed5-4012-ab93-63e3095aecad\") " pod="swift-kuttl-tests/swift-ring-rebalance-7494v" Jan 29 12:29:03 crc kubenswrapper[4753]: I0129 12:29:03.344025 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/2b27a01f-bed5-4012-ab93-63e3095aecad-swiftconf\") pod \"swift-ring-rebalance-7494v\" (UID: \"2b27a01f-bed5-4012-ab93-63e3095aecad\") " pod="swift-kuttl-tests/swift-ring-rebalance-7494v" Jan 29 12:29:03 crc kubenswrapper[4753]: I0129 12:29:03.344682 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/2b27a01f-bed5-4012-ab93-63e3095aecad-etc-swift\") pod \"swift-ring-rebalance-7494v\" (UID: \"2b27a01f-bed5-4012-ab93-63e3095aecad\") " pod="swift-kuttl-tests/swift-ring-rebalance-7494v" Jan 29 12:29:03 crc kubenswrapper[4753]: I0129 12:29:03.344961 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2b27a01f-bed5-4012-ab93-63e3095aecad-scripts\") pod \"swift-ring-rebalance-7494v\" (UID: \"2b27a01f-bed5-4012-ab93-63e3095aecad\") " pod="swift-kuttl-tests/swift-ring-rebalance-7494v" Jan 29 12:29:03 crc kubenswrapper[4753]: I0129 12:29:03.344964 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/2b27a01f-bed5-4012-ab93-63e3095aecad-ring-data-devices\") pod \"swift-ring-rebalance-7494v\" (UID: \"2b27a01f-bed5-4012-ab93-63e3095aecad\") " pod="swift-kuttl-tests/swift-ring-rebalance-7494v" Jan 29 12:29:03 crc kubenswrapper[4753]: I0129 12:29:03.345121 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/2b27a01f-bed5-4012-ab93-63e3095aecad-dispersionconf\") pod \"swift-ring-rebalance-7494v\" (UID: \"2b27a01f-bed5-4012-ab93-63e3095aecad\") " pod="swift-kuttl-tests/swift-ring-rebalance-7494v" Jan 29 12:29:03 crc kubenswrapper[4753]: I0129 12:29:03.347734 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/2b27a01f-bed5-4012-ab93-63e3095aecad-swiftconf\") pod \"swift-ring-rebalance-7494v\" (UID: \"2b27a01f-bed5-4012-ab93-63e3095aecad\") " pod="swift-kuttl-tests/swift-ring-rebalance-7494v" Jan 29 12:29:03 crc kubenswrapper[4753]: I0129 12:29:03.349214 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/2b27a01f-bed5-4012-ab93-63e3095aecad-dispersionconf\") pod \"swift-ring-rebalance-7494v\" (UID: \"2b27a01f-bed5-4012-ab93-63e3095aecad\") " pod="swift-kuttl-tests/swift-ring-rebalance-7494v" Jan 29 12:29:03 crc kubenswrapper[4753]: I0129 12:29:03.363602 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jss8c\" (UniqueName: \"kubernetes.io/projected/2b27a01f-bed5-4012-ab93-63e3095aecad-kube-api-access-jss8c\") pod \"swift-ring-rebalance-7494v\" (UID: \"2b27a01f-bed5-4012-ab93-63e3095aecad\") " pod="swift-kuttl-tests/swift-ring-rebalance-7494v" Jan 29 12:29:03 crc kubenswrapper[4753]: I0129 12:29:03.450611 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-ring-rebalance-7494v" Jan 29 12:29:03 crc kubenswrapper[4753]: I0129 12:29:03.547975 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/138a8739-c950-4fe4-9aaa-e232bdea3f7b-etc-swift\") pod \"swift-storage-0\" (UID: \"138a8739-c950-4fe4-9aaa-e232bdea3f7b\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:29:03 crc kubenswrapper[4753]: E0129 12:29:03.548248 4753 projected.go:288] Couldn't get configMap swift-kuttl-tests/swift-ring-files: configmap "swift-ring-files" not found Jan 29 12:29:03 crc kubenswrapper[4753]: E0129 12:29:03.548272 4753 projected.go:194] Error preparing data for projected volume etc-swift for pod swift-kuttl-tests/swift-storage-0: configmap "swift-ring-files" not found Jan 29 12:29:03 crc kubenswrapper[4753]: E0129 12:29:03.548358 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/138a8739-c950-4fe4-9aaa-e232bdea3f7b-etc-swift podName:138a8739-c950-4fe4-9aaa-e232bdea3f7b nodeName:}" failed. No retries permitted until 2026-01-29 12:29:04.548330822 +0000 UTC m=+1358.800412267 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/138a8739-c950-4fe4-9aaa-e232bdea3f7b-etc-swift") pod "swift-storage-0" (UID: "138a8739-c950-4fe4-9aaa-e232bdea3f7b") : configmap "swift-ring-files" not found Jan 29 12:29:03 crc kubenswrapper[4753]: I0129 12:29:03.929448 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/swift-ring-rebalance-7494v"] Jan 29 12:29:04 crc kubenswrapper[4753]: I0129 12:29:04.108959 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-ring-rebalance-7494v" event={"ID":"2b27a01f-bed5-4012-ab93-63e3095aecad","Type":"ContainerStarted","Data":"84dbb1b5424fe4c6208233c9fd586ece1455217c08696558cf49c32356d56338"} Jan 29 12:29:04 crc kubenswrapper[4753]: I0129 12:29:04.109028 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-ring-rebalance-dlpgk" Jan 29 12:29:04 crc kubenswrapper[4753]: I0129 12:29:04.122384 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-ring-rebalance-dlpgk" Jan 29 12:29:04 crc kubenswrapper[4753]: I0129 12:29:04.258322 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/37565540-2b9c-4657-bc00-b79793e082a0-ring-data-devices\") pod \"37565540-2b9c-4657-bc00-b79793e082a0\" (UID: \"37565540-2b9c-4657-bc00-b79793e082a0\") " Jan 29 12:29:04 crc kubenswrapper[4753]: I0129 12:29:04.258459 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/37565540-2b9c-4657-bc00-b79793e082a0-etc-swift\") pod \"37565540-2b9c-4657-bc00-b79793e082a0\" (UID: \"37565540-2b9c-4657-bc00-b79793e082a0\") " Jan 29 12:29:04 crc kubenswrapper[4753]: I0129 12:29:04.258525 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x5vwn\" (UniqueName: \"kubernetes.io/projected/37565540-2b9c-4657-bc00-b79793e082a0-kube-api-access-x5vwn\") pod \"37565540-2b9c-4657-bc00-b79793e082a0\" (UID: \"37565540-2b9c-4657-bc00-b79793e082a0\") " Jan 29 12:29:04 crc kubenswrapper[4753]: I0129 12:29:04.258616 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/37565540-2b9c-4657-bc00-b79793e082a0-scripts\") pod \"37565540-2b9c-4657-bc00-b79793e082a0\" (UID: \"37565540-2b9c-4657-bc00-b79793e082a0\") " Jan 29 12:29:04 crc kubenswrapper[4753]: I0129 12:29:04.258643 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/37565540-2b9c-4657-bc00-b79793e082a0-swiftconf\") pod \"37565540-2b9c-4657-bc00-b79793e082a0\" (UID: \"37565540-2b9c-4657-bc00-b79793e082a0\") " Jan 29 12:29:04 crc kubenswrapper[4753]: I0129 12:29:04.258680 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/37565540-2b9c-4657-bc00-b79793e082a0-dispersionconf\") pod \"37565540-2b9c-4657-bc00-b79793e082a0\" (UID: \"37565540-2b9c-4657-bc00-b79793e082a0\") " Jan 29 12:29:04 crc kubenswrapper[4753]: I0129 12:29:04.259385 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/37565540-2b9c-4657-bc00-b79793e082a0-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "37565540-2b9c-4657-bc00-b79793e082a0" (UID: "37565540-2b9c-4657-bc00-b79793e082a0"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:29:04 crc kubenswrapper[4753]: I0129 12:29:04.259689 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/37565540-2b9c-4657-bc00-b79793e082a0-scripts" (OuterVolumeSpecName: "scripts") pod "37565540-2b9c-4657-bc00-b79793e082a0" (UID: "37565540-2b9c-4657-bc00-b79793e082a0"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:29:04 crc kubenswrapper[4753]: I0129 12:29:04.260534 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/37565540-2b9c-4657-bc00-b79793e082a0-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "37565540-2b9c-4657-bc00-b79793e082a0" (UID: "37565540-2b9c-4657-bc00-b79793e082a0"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:29:04 crc kubenswrapper[4753]: I0129 12:29:04.262450 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/37565540-2b9c-4657-bc00-b79793e082a0-kube-api-access-x5vwn" (OuterVolumeSpecName: "kube-api-access-x5vwn") pod "37565540-2b9c-4657-bc00-b79793e082a0" (UID: "37565540-2b9c-4657-bc00-b79793e082a0"). InnerVolumeSpecName "kube-api-access-x5vwn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:29:04 crc kubenswrapper[4753]: I0129 12:29:04.266747 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/37565540-2b9c-4657-bc00-b79793e082a0-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "37565540-2b9c-4657-bc00-b79793e082a0" (UID: "37565540-2b9c-4657-bc00-b79793e082a0"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:29:04 crc kubenswrapper[4753]: I0129 12:29:04.309970 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/37565540-2b9c-4657-bc00-b79793e082a0-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "37565540-2b9c-4657-bc00-b79793e082a0" (UID: "37565540-2b9c-4657-bc00-b79793e082a0"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:29:04 crc kubenswrapper[4753]: I0129 12:29:04.360701 4753 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/37565540-2b9c-4657-bc00-b79793e082a0-ring-data-devices\") on node \"crc\" DevicePath \"\"" Jan 29 12:29:04 crc kubenswrapper[4753]: I0129 12:29:04.360749 4753 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/37565540-2b9c-4657-bc00-b79793e082a0-etc-swift\") on node \"crc\" DevicePath \"\"" Jan 29 12:29:04 crc kubenswrapper[4753]: I0129 12:29:04.360761 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x5vwn\" (UniqueName: \"kubernetes.io/projected/37565540-2b9c-4657-bc00-b79793e082a0-kube-api-access-x5vwn\") on node \"crc\" DevicePath \"\"" Jan 29 12:29:04 crc kubenswrapper[4753]: I0129 12:29:04.360772 4753 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/37565540-2b9c-4657-bc00-b79793e082a0-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 12:29:04 crc kubenswrapper[4753]: I0129 12:29:04.360781 4753 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/37565540-2b9c-4657-bc00-b79793e082a0-swiftconf\") on node \"crc\" DevicePath \"\"" Jan 29 12:29:04 crc kubenswrapper[4753]: I0129 12:29:04.360790 4753 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/37565540-2b9c-4657-bc00-b79793e082a0-dispersionconf\") on node \"crc\" DevicePath \"\"" Jan 29 12:29:04 crc kubenswrapper[4753]: I0129 12:29:04.566031 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/138a8739-c950-4fe4-9aaa-e232bdea3f7b-etc-swift\") pod \"swift-storage-0\" (UID: \"138a8739-c950-4fe4-9aaa-e232bdea3f7b\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:29:04 crc kubenswrapper[4753]: E0129 12:29:04.566519 4753 projected.go:288] Couldn't get configMap swift-kuttl-tests/swift-ring-files: configmap "swift-ring-files" not found Jan 29 12:29:04 crc kubenswrapper[4753]: E0129 12:29:04.566546 4753 projected.go:194] Error preparing data for projected volume etc-swift for pod swift-kuttl-tests/swift-storage-0: configmap "swift-ring-files" not found Jan 29 12:29:04 crc kubenswrapper[4753]: E0129 12:29:04.566655 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/138a8739-c950-4fe4-9aaa-e232bdea3f7b-etc-swift podName:138a8739-c950-4fe4-9aaa-e232bdea3f7b nodeName:}" failed. No retries permitted until 2026-01-29 12:29:06.566587366 +0000 UTC m=+1360.818668821 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/138a8739-c950-4fe4-9aaa-e232bdea3f7b-etc-swift") pod "swift-storage-0" (UID: "138a8739-c950-4fe4-9aaa-e232bdea3f7b") : configmap "swift-ring-files" not found Jan 29 12:29:05 crc kubenswrapper[4753]: I0129 12:29:05.120156 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-ring-rebalance-dlpgk" Jan 29 12:29:05 crc kubenswrapper[4753]: I0129 12:29:05.120147 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-ring-rebalance-7494v" event={"ID":"2b27a01f-bed5-4012-ab93-63e3095aecad","Type":"ContainerStarted","Data":"5a4fc950ff1f25f77d0dd675477676b8f9f5793292681d47966e5858e7b180ce"} Jan 29 12:29:05 crc kubenswrapper[4753]: I0129 12:29:05.149930 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="swift-kuttl-tests/swift-ring-rebalance-7494v" podStartSLOduration=2.149907352 podStartE2EDuration="2.149907352s" podCreationTimestamp="2026-01-29 12:29:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:29:05.142785981 +0000 UTC m=+1359.394867446" watchObservedRunningTime="2026-01-29 12:29:05.149907352 +0000 UTC m=+1359.401988817" Jan 29 12:29:05 crc kubenswrapper[4753]: I0129 12:29:05.214130 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["swift-kuttl-tests/swift-ring-rebalance-dlpgk"] Jan 29 12:29:05 crc kubenswrapper[4753]: I0129 12:29:05.219554 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["swift-kuttl-tests/swift-ring-rebalance-dlpgk"] Jan 29 12:29:05 crc kubenswrapper[4753]: I0129 12:29:05.899684 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="37565540-2b9c-4657-bc00-b79793e082a0" path="/var/lib/kubelet/pods/37565540-2b9c-4657-bc00-b79793e082a0/volumes" Jan 29 12:29:06 crc kubenswrapper[4753]: I0129 12:29:06.645751 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/138a8739-c950-4fe4-9aaa-e232bdea3f7b-etc-swift\") pod \"swift-storage-0\" (UID: \"138a8739-c950-4fe4-9aaa-e232bdea3f7b\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:29:06 crc kubenswrapper[4753]: E0129 12:29:06.646008 4753 projected.go:288] Couldn't get configMap swift-kuttl-tests/swift-ring-files: configmap "swift-ring-files" not found Jan 29 12:29:06 crc kubenswrapper[4753]: E0129 12:29:06.646161 4753 projected.go:194] Error preparing data for projected volume etc-swift for pod swift-kuttl-tests/swift-storage-0: configmap "swift-ring-files" not found Jan 29 12:29:06 crc kubenswrapper[4753]: E0129 12:29:06.646237 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/138a8739-c950-4fe4-9aaa-e232bdea3f7b-etc-swift podName:138a8739-c950-4fe4-9aaa-e232bdea3f7b nodeName:}" failed. No retries permitted until 2026-01-29 12:29:10.64620613 +0000 UTC m=+1364.898287585 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/138a8739-c950-4fe4-9aaa-e232bdea3f7b-etc-swift") pod "swift-storage-0" (UID: "138a8739-c950-4fe4-9aaa-e232bdea3f7b") : configmap "swift-ring-files" not found Jan 29 12:29:10 crc kubenswrapper[4753]: I0129 12:29:10.743838 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/138a8739-c950-4fe4-9aaa-e232bdea3f7b-etc-swift\") pod \"swift-storage-0\" (UID: \"138a8739-c950-4fe4-9aaa-e232bdea3f7b\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:29:10 crc kubenswrapper[4753]: E0129 12:29:10.744184 4753 projected.go:288] Couldn't get configMap swift-kuttl-tests/swift-ring-files: configmap "swift-ring-files" not found Jan 29 12:29:10 crc kubenswrapper[4753]: E0129 12:29:10.744303 4753 projected.go:194] Error preparing data for projected volume etc-swift for pod swift-kuttl-tests/swift-storage-0: configmap "swift-ring-files" not found Jan 29 12:29:10 crc kubenswrapper[4753]: E0129 12:29:10.744417 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/138a8739-c950-4fe4-9aaa-e232bdea3f7b-etc-swift podName:138a8739-c950-4fe4-9aaa-e232bdea3f7b nodeName:}" failed. No retries permitted until 2026-01-29 12:29:18.744384974 +0000 UTC m=+1372.996466469 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/138a8739-c950-4fe4-9aaa-e232bdea3f7b-etc-swift") pod "swift-storage-0" (UID: "138a8739-c950-4fe4-9aaa-e232bdea3f7b") : configmap "swift-ring-files" not found Jan 29 12:29:11 crc kubenswrapper[4753]: I0129 12:29:11.166591 4753 generic.go:334] "Generic (PLEG): container finished" podID="2b27a01f-bed5-4012-ab93-63e3095aecad" containerID="5a4fc950ff1f25f77d0dd675477676b8f9f5793292681d47966e5858e7b180ce" exitCode=0 Jan 29 12:29:11 crc kubenswrapper[4753]: I0129 12:29:11.166645 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-ring-rebalance-7494v" event={"ID":"2b27a01f-bed5-4012-ab93-63e3095aecad","Type":"ContainerDied","Data":"5a4fc950ff1f25f77d0dd675477676b8f9f5793292681d47966e5858e7b180ce"} Jan 29 12:29:12 crc kubenswrapper[4753]: I0129 12:29:12.592365 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-ring-rebalance-7494v" Jan 29 12:29:12 crc kubenswrapper[4753]: I0129 12:29:12.696455 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/2b27a01f-bed5-4012-ab93-63e3095aecad-ring-data-devices\") pod \"2b27a01f-bed5-4012-ab93-63e3095aecad\" (UID: \"2b27a01f-bed5-4012-ab93-63e3095aecad\") " Jan 29 12:29:12 crc kubenswrapper[4753]: I0129 12:29:12.696788 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/2b27a01f-bed5-4012-ab93-63e3095aecad-dispersionconf\") pod \"2b27a01f-bed5-4012-ab93-63e3095aecad\" (UID: \"2b27a01f-bed5-4012-ab93-63e3095aecad\") " Jan 29 12:29:12 crc kubenswrapper[4753]: I0129 12:29:12.696824 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2b27a01f-bed5-4012-ab93-63e3095aecad-scripts\") pod \"2b27a01f-bed5-4012-ab93-63e3095aecad\" (UID: \"2b27a01f-bed5-4012-ab93-63e3095aecad\") " Jan 29 12:29:12 crc kubenswrapper[4753]: I0129 12:29:12.696852 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/2b27a01f-bed5-4012-ab93-63e3095aecad-swiftconf\") pod \"2b27a01f-bed5-4012-ab93-63e3095aecad\" (UID: \"2b27a01f-bed5-4012-ab93-63e3095aecad\") " Jan 29 12:29:12 crc kubenswrapper[4753]: I0129 12:29:12.696929 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jss8c\" (UniqueName: \"kubernetes.io/projected/2b27a01f-bed5-4012-ab93-63e3095aecad-kube-api-access-jss8c\") pod \"2b27a01f-bed5-4012-ab93-63e3095aecad\" (UID: \"2b27a01f-bed5-4012-ab93-63e3095aecad\") " Jan 29 12:29:12 crc kubenswrapper[4753]: I0129 12:29:12.696997 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/2b27a01f-bed5-4012-ab93-63e3095aecad-etc-swift\") pod \"2b27a01f-bed5-4012-ab93-63e3095aecad\" (UID: \"2b27a01f-bed5-4012-ab93-63e3095aecad\") " Jan 29 12:29:12 crc kubenswrapper[4753]: I0129 12:29:12.697368 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2b27a01f-bed5-4012-ab93-63e3095aecad-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "2b27a01f-bed5-4012-ab93-63e3095aecad" (UID: "2b27a01f-bed5-4012-ab93-63e3095aecad"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:29:12 crc kubenswrapper[4753]: I0129 12:29:12.698122 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2b27a01f-bed5-4012-ab93-63e3095aecad-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "2b27a01f-bed5-4012-ab93-63e3095aecad" (UID: "2b27a01f-bed5-4012-ab93-63e3095aecad"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:29:12 crc kubenswrapper[4753]: I0129 12:29:12.702712 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2b27a01f-bed5-4012-ab93-63e3095aecad-kube-api-access-jss8c" (OuterVolumeSpecName: "kube-api-access-jss8c") pod "2b27a01f-bed5-4012-ab93-63e3095aecad" (UID: "2b27a01f-bed5-4012-ab93-63e3095aecad"). InnerVolumeSpecName "kube-api-access-jss8c". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:29:12 crc kubenswrapper[4753]: I0129 12:29:12.710657 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2b27a01f-bed5-4012-ab93-63e3095aecad-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "2b27a01f-bed5-4012-ab93-63e3095aecad" (UID: "2b27a01f-bed5-4012-ab93-63e3095aecad"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:29:12 crc kubenswrapper[4753]: I0129 12:29:12.718409 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2b27a01f-bed5-4012-ab93-63e3095aecad-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "2b27a01f-bed5-4012-ab93-63e3095aecad" (UID: "2b27a01f-bed5-4012-ab93-63e3095aecad"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:29:12 crc kubenswrapper[4753]: I0129 12:29:12.728731 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2b27a01f-bed5-4012-ab93-63e3095aecad-scripts" (OuterVolumeSpecName: "scripts") pod "2b27a01f-bed5-4012-ab93-63e3095aecad" (UID: "2b27a01f-bed5-4012-ab93-63e3095aecad"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:29:12 crc kubenswrapper[4753]: I0129 12:29:12.799107 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jss8c\" (UniqueName: \"kubernetes.io/projected/2b27a01f-bed5-4012-ab93-63e3095aecad-kube-api-access-jss8c\") on node \"crc\" DevicePath \"\"" Jan 29 12:29:12 crc kubenswrapper[4753]: I0129 12:29:12.799169 4753 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/2b27a01f-bed5-4012-ab93-63e3095aecad-etc-swift\") on node \"crc\" DevicePath \"\"" Jan 29 12:29:12 crc kubenswrapper[4753]: I0129 12:29:12.799191 4753 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/2b27a01f-bed5-4012-ab93-63e3095aecad-ring-data-devices\") on node \"crc\" DevicePath \"\"" Jan 29 12:29:12 crc kubenswrapper[4753]: I0129 12:29:12.799208 4753 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/2b27a01f-bed5-4012-ab93-63e3095aecad-dispersionconf\") on node \"crc\" DevicePath \"\"" Jan 29 12:29:12 crc kubenswrapper[4753]: I0129 12:29:12.799249 4753 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2b27a01f-bed5-4012-ab93-63e3095aecad-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 12:29:12 crc kubenswrapper[4753]: I0129 12:29:12.799268 4753 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/2b27a01f-bed5-4012-ab93-63e3095aecad-swiftconf\") on node \"crc\" DevicePath \"\"" Jan 29 12:29:13 crc kubenswrapper[4753]: I0129 12:29:13.211006 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-ring-rebalance-7494v" event={"ID":"2b27a01f-bed5-4012-ab93-63e3095aecad","Type":"ContainerDied","Data":"84dbb1b5424fe4c6208233c9fd586ece1455217c08696558cf49c32356d56338"} Jan 29 12:29:13 crc kubenswrapper[4753]: I0129 12:29:13.211183 4753 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="84dbb1b5424fe4c6208233c9fd586ece1455217c08696558cf49c32356d56338" Jan 29 12:29:13 crc kubenswrapper[4753]: I0129 12:29:13.211481 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-ring-rebalance-7494v" Jan 29 12:29:18 crc kubenswrapper[4753]: I0129 12:29:18.807158 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/138a8739-c950-4fe4-9aaa-e232bdea3f7b-etc-swift\") pod \"swift-storage-0\" (UID: \"138a8739-c950-4fe4-9aaa-e232bdea3f7b\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:29:18 crc kubenswrapper[4753]: I0129 12:29:18.819221 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/138a8739-c950-4fe4-9aaa-e232bdea3f7b-etc-swift\") pod \"swift-storage-0\" (UID: \"138a8739-c950-4fe4-9aaa-e232bdea3f7b\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:29:18 crc kubenswrapper[4753]: I0129 12:29:18.901284 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:29:20 crc kubenswrapper[4753]: I0129 12:29:20.032684 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/swift-storage-0"] Jan 29 12:29:20 crc kubenswrapper[4753]: I0129 12:29:20.285334 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"138a8739-c950-4fe4-9aaa-e232bdea3f7b","Type":"ContainerStarted","Data":"d2af7af180031de8fdd0983850a21bbb0dc7bc730ec666598e572f49cb56c2a4"} Jan 29 12:29:20 crc kubenswrapper[4753]: I0129 12:29:20.285392 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"138a8739-c950-4fe4-9aaa-e232bdea3f7b","Type":"ContainerStarted","Data":"04e8b885bf5708fa3401f1a27e8523229ca977ae09514545c5098a56e2dca64b"} Jan 29 12:29:21 crc kubenswrapper[4753]: I0129 12:29:21.325479 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"138a8739-c950-4fe4-9aaa-e232bdea3f7b","Type":"ContainerStarted","Data":"aae9e4a99411d122b8ed7b019b9e7329b1b82581947c665733e25cb2ccf08256"} Jan 29 12:29:21 crc kubenswrapper[4753]: I0129 12:29:21.325912 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"138a8739-c950-4fe4-9aaa-e232bdea3f7b","Type":"ContainerStarted","Data":"9bda9aa7a80e8ba90786ee356d5874c29c2779adfc8d54c3a2dccdc45759191f"} Jan 29 12:29:21 crc kubenswrapper[4753]: I0129 12:29:21.325932 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"138a8739-c950-4fe4-9aaa-e232bdea3f7b","Type":"ContainerStarted","Data":"b31c0f4794d0d19b53ee6f57a63b973e041e1db6694a61130bdd7106c83c286a"} Jan 29 12:29:21 crc kubenswrapper[4753]: I0129 12:29:21.325943 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"138a8739-c950-4fe4-9aaa-e232bdea3f7b","Type":"ContainerStarted","Data":"87187568015a0e156148a44f379441b4861da67970bece8b4474914aa31f9dba"} Jan 29 12:29:21 crc kubenswrapper[4753]: I0129 12:29:21.325953 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"138a8739-c950-4fe4-9aaa-e232bdea3f7b","Type":"ContainerStarted","Data":"cea3edd31c2b81739cf355a1b06782457367cb6df6f5563b24c73f09b71ddbf9"} Jan 29 12:29:21 crc kubenswrapper[4753]: I0129 12:29:21.325964 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"138a8739-c950-4fe4-9aaa-e232bdea3f7b","Type":"ContainerStarted","Data":"da235b77fd3100fc09167c2bfa1fe6dcd9d3796d72e3c97fa902c454142b0594"} Jan 29 12:29:21 crc kubenswrapper[4753]: I0129 12:29:21.325975 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"138a8739-c950-4fe4-9aaa-e232bdea3f7b","Type":"ContainerStarted","Data":"6fb444f8a28cd99977a4bf8425096045f77ba4946835c63cbdeadef200038963"} Jan 29 12:29:22 crc kubenswrapper[4753]: I0129 12:29:22.341953 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"138a8739-c950-4fe4-9aaa-e232bdea3f7b","Type":"ContainerStarted","Data":"40737a21840cfda7ec2c3c77af6e8921570fae2535ec52d19c97b4e32a6925e5"} Jan 29 12:29:22 crc kubenswrapper[4753]: I0129 12:29:22.342233 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"138a8739-c950-4fe4-9aaa-e232bdea3f7b","Type":"ContainerStarted","Data":"5b83e1721f6dc650c1c22675c76d28414a7df673068b610229a08310233f3430"} Jan 29 12:29:22 crc kubenswrapper[4753]: I0129 12:29:22.342244 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"138a8739-c950-4fe4-9aaa-e232bdea3f7b","Type":"ContainerStarted","Data":"c3746f53cf45602790eb986ea13b559998358657e337ca9a64e63b7065410941"} Jan 29 12:29:22 crc kubenswrapper[4753]: I0129 12:29:22.342253 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"138a8739-c950-4fe4-9aaa-e232bdea3f7b","Type":"ContainerStarted","Data":"e5782ff28242ca4b5c6270a37148829832413394f867d4c8a8b48f28cd77d95b"} Jan 29 12:29:22 crc kubenswrapper[4753]: I0129 12:29:22.342261 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"138a8739-c950-4fe4-9aaa-e232bdea3f7b","Type":"ContainerStarted","Data":"83c3bedf95a58e8d14219b47ae5fb9e7f9e6c837be7170b58c5b00ab6bb13bd3"} Jan 29 12:29:22 crc kubenswrapper[4753]: I0129 12:29:22.342269 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"138a8739-c950-4fe4-9aaa-e232bdea3f7b","Type":"ContainerStarted","Data":"e9fae1d0304d1a6d261a833051d3006fa143b16cd6d6e8cbeb4a066535097794"} Jan 29 12:29:23 crc kubenswrapper[4753]: I0129 12:29:23.361462 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"138a8739-c950-4fe4-9aaa-e232bdea3f7b","Type":"ContainerStarted","Data":"bb19f71c8e939cc414644c25eaf5efc7de06e81b24e457c1ea9f7a902dcce561"} Jan 29 12:29:23 crc kubenswrapper[4753]: I0129 12:29:23.361535 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"138a8739-c950-4fe4-9aaa-e232bdea3f7b","Type":"ContainerStarted","Data":"7fcbcb12053c0515810e35d7051fa5695d83d9eb527057e515134b63851a5c9a"} Jan 29 12:29:23 crc kubenswrapper[4753]: I0129 12:29:23.405533 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="swift-kuttl-tests/swift-storage-0" podStartSLOduration=22.405498275 podStartE2EDuration="22.405498275s" podCreationTimestamp="2026-01-29 12:29:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:29:23.404208438 +0000 UTC m=+1377.656289893" watchObservedRunningTime="2026-01-29 12:29:23.405498275 +0000 UTC m=+1377.657579730" Jan 29 12:29:28 crc kubenswrapper[4753]: I0129 12:29:28.721401 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["swift-kuttl-tests/swift-proxy-7769498bdc-462lm"] Jan 29 12:29:28 crc kubenswrapper[4753]: E0129 12:29:28.722293 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2b27a01f-bed5-4012-ab93-63e3095aecad" containerName="swift-ring-rebalance" Jan 29 12:29:28 crc kubenswrapper[4753]: I0129 12:29:28.722307 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="2b27a01f-bed5-4012-ab93-63e3095aecad" containerName="swift-ring-rebalance" Jan 29 12:29:28 crc kubenswrapper[4753]: I0129 12:29:28.722478 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="2b27a01f-bed5-4012-ab93-63e3095aecad" containerName="swift-ring-rebalance" Jan 29 12:29:28 crc kubenswrapper[4753]: I0129 12:29:28.723194 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-proxy-7769498bdc-462lm" Jan 29 12:29:28 crc kubenswrapper[4753]: I0129 12:29:28.725711 4753 reflector.go:368] Caches populated for *v1.Secret from object-"swift-kuttl-tests"/"swift-proxy-config-data" Jan 29 12:29:28 crc kubenswrapper[4753]: I0129 12:29:28.746468 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/swift-proxy-7769498bdc-462lm"] Jan 29 12:29:28 crc kubenswrapper[4753]: I0129 12:29:28.792138 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gqj2k\" (UniqueName: \"kubernetes.io/projected/9a39086c-d994-44cb-a300-ee10175b234e-kube-api-access-gqj2k\") pod \"swift-proxy-7769498bdc-462lm\" (UID: \"9a39086c-d994-44cb-a300-ee10175b234e\") " pod="swift-kuttl-tests/swift-proxy-7769498bdc-462lm" Jan 29 12:29:28 crc kubenswrapper[4753]: I0129 12:29:28.792300 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9a39086c-d994-44cb-a300-ee10175b234e-run-httpd\") pod \"swift-proxy-7769498bdc-462lm\" (UID: \"9a39086c-d994-44cb-a300-ee10175b234e\") " pod="swift-kuttl-tests/swift-proxy-7769498bdc-462lm" Jan 29 12:29:28 crc kubenswrapper[4753]: I0129 12:29:28.792326 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9a39086c-d994-44cb-a300-ee10175b234e-config-data\") pod \"swift-proxy-7769498bdc-462lm\" (UID: \"9a39086c-d994-44cb-a300-ee10175b234e\") " pod="swift-kuttl-tests/swift-proxy-7769498bdc-462lm" Jan 29 12:29:28 crc kubenswrapper[4753]: I0129 12:29:28.792419 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/9a39086c-d994-44cb-a300-ee10175b234e-etc-swift\") pod \"swift-proxy-7769498bdc-462lm\" (UID: \"9a39086c-d994-44cb-a300-ee10175b234e\") " pod="swift-kuttl-tests/swift-proxy-7769498bdc-462lm" Jan 29 12:29:28 crc kubenswrapper[4753]: I0129 12:29:28.792535 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9a39086c-d994-44cb-a300-ee10175b234e-log-httpd\") pod \"swift-proxy-7769498bdc-462lm\" (UID: \"9a39086c-d994-44cb-a300-ee10175b234e\") " pod="swift-kuttl-tests/swift-proxy-7769498bdc-462lm" Jan 29 12:29:28 crc kubenswrapper[4753]: I0129 12:29:28.893899 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gqj2k\" (UniqueName: \"kubernetes.io/projected/9a39086c-d994-44cb-a300-ee10175b234e-kube-api-access-gqj2k\") pod \"swift-proxy-7769498bdc-462lm\" (UID: \"9a39086c-d994-44cb-a300-ee10175b234e\") " pod="swift-kuttl-tests/swift-proxy-7769498bdc-462lm" Jan 29 12:29:28 crc kubenswrapper[4753]: I0129 12:29:28.893986 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9a39086c-d994-44cb-a300-ee10175b234e-run-httpd\") pod \"swift-proxy-7769498bdc-462lm\" (UID: \"9a39086c-d994-44cb-a300-ee10175b234e\") " pod="swift-kuttl-tests/swift-proxy-7769498bdc-462lm" Jan 29 12:29:28 crc kubenswrapper[4753]: I0129 12:29:28.894011 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9a39086c-d994-44cb-a300-ee10175b234e-config-data\") pod \"swift-proxy-7769498bdc-462lm\" (UID: \"9a39086c-d994-44cb-a300-ee10175b234e\") " pod="swift-kuttl-tests/swift-proxy-7769498bdc-462lm" Jan 29 12:29:28 crc kubenswrapper[4753]: I0129 12:29:28.894060 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/9a39086c-d994-44cb-a300-ee10175b234e-etc-swift\") pod \"swift-proxy-7769498bdc-462lm\" (UID: \"9a39086c-d994-44cb-a300-ee10175b234e\") " pod="swift-kuttl-tests/swift-proxy-7769498bdc-462lm" Jan 29 12:29:28 crc kubenswrapper[4753]: I0129 12:29:28.894154 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9a39086c-d994-44cb-a300-ee10175b234e-log-httpd\") pod \"swift-proxy-7769498bdc-462lm\" (UID: \"9a39086c-d994-44cb-a300-ee10175b234e\") " pod="swift-kuttl-tests/swift-proxy-7769498bdc-462lm" Jan 29 12:29:28 crc kubenswrapper[4753]: I0129 12:29:28.894686 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9a39086c-d994-44cb-a300-ee10175b234e-log-httpd\") pod \"swift-proxy-7769498bdc-462lm\" (UID: \"9a39086c-d994-44cb-a300-ee10175b234e\") " pod="swift-kuttl-tests/swift-proxy-7769498bdc-462lm" Jan 29 12:29:28 crc kubenswrapper[4753]: I0129 12:29:28.894812 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9a39086c-d994-44cb-a300-ee10175b234e-run-httpd\") pod \"swift-proxy-7769498bdc-462lm\" (UID: \"9a39086c-d994-44cb-a300-ee10175b234e\") " pod="swift-kuttl-tests/swift-proxy-7769498bdc-462lm" Jan 29 12:29:28 crc kubenswrapper[4753]: I0129 12:29:28.901051 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9a39086c-d994-44cb-a300-ee10175b234e-config-data\") pod \"swift-proxy-7769498bdc-462lm\" (UID: \"9a39086c-d994-44cb-a300-ee10175b234e\") " pod="swift-kuttl-tests/swift-proxy-7769498bdc-462lm" Jan 29 12:29:28 crc kubenswrapper[4753]: I0129 12:29:28.911083 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/9a39086c-d994-44cb-a300-ee10175b234e-etc-swift\") pod \"swift-proxy-7769498bdc-462lm\" (UID: \"9a39086c-d994-44cb-a300-ee10175b234e\") " pod="swift-kuttl-tests/swift-proxy-7769498bdc-462lm" Jan 29 12:29:28 crc kubenswrapper[4753]: I0129 12:29:28.913461 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gqj2k\" (UniqueName: \"kubernetes.io/projected/9a39086c-d994-44cb-a300-ee10175b234e-kube-api-access-gqj2k\") pod \"swift-proxy-7769498bdc-462lm\" (UID: \"9a39086c-d994-44cb-a300-ee10175b234e\") " pod="swift-kuttl-tests/swift-proxy-7769498bdc-462lm" Jan 29 12:29:29 crc kubenswrapper[4753]: I0129 12:29:29.045982 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-proxy-7769498bdc-462lm" Jan 29 12:29:29 crc kubenswrapper[4753]: I0129 12:29:29.252513 4753 patch_prober.go:28] interesting pod/machine-config-daemon-7c24x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 12:29:29 crc kubenswrapper[4753]: I0129 12:29:29.252582 4753 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" podUID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 12:29:29 crc kubenswrapper[4753]: I0129 12:29:29.499932 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/swift-proxy-7769498bdc-462lm"] Jan 29 12:29:30 crc kubenswrapper[4753]: I0129 12:29:30.437002 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-proxy-7769498bdc-462lm" event={"ID":"9a39086c-d994-44cb-a300-ee10175b234e","Type":"ContainerStarted","Data":"2297f3f32e7f61f7be1ba11eb96ea1b22f792effd89eaabd82a9e04ea69b8093"} Jan 29 12:29:30 crc kubenswrapper[4753]: I0129 12:29:30.437625 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="swift-kuttl-tests/swift-proxy-7769498bdc-462lm" Jan 29 12:29:30 crc kubenswrapper[4753]: I0129 12:29:30.437659 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-proxy-7769498bdc-462lm" event={"ID":"9a39086c-d994-44cb-a300-ee10175b234e","Type":"ContainerStarted","Data":"70c7c82643874e91a115ede43c1fc951dbdee8c165507e3453504fe71e2b17bb"} Jan 29 12:29:30 crc kubenswrapper[4753]: I0129 12:29:30.437678 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-proxy-7769498bdc-462lm" event={"ID":"9a39086c-d994-44cb-a300-ee10175b234e","Type":"ContainerStarted","Data":"308bfb203819c7445a07b64d248d4e477e48663108d8025ef429e0f8daede5f0"} Jan 29 12:29:30 crc kubenswrapper[4753]: I0129 12:29:30.466550 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="swift-kuttl-tests/swift-proxy-7769498bdc-462lm" podStartSLOduration=2.466522515 podStartE2EDuration="2.466522515s" podCreationTimestamp="2026-01-29 12:29:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:29:30.461298367 +0000 UTC m=+1384.713379832" watchObservedRunningTime="2026-01-29 12:29:30.466522515 +0000 UTC m=+1384.718603970" Jan 29 12:29:31 crc kubenswrapper[4753]: I0129 12:29:31.445433 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="swift-kuttl-tests/swift-proxy-7769498bdc-462lm" Jan 29 12:29:34 crc kubenswrapper[4753]: I0129 12:29:34.829104 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="swift-kuttl-tests/swift-proxy-7769498bdc-462lm" Jan 29 12:29:34 crc kubenswrapper[4753]: I0129 12:29:34.906407 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="swift-kuttl-tests/swift-proxy-7769498bdc-462lm" Jan 29 12:29:36 crc kubenswrapper[4753]: I0129 12:29:36.918713 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["swift-kuttl-tests/swift-ring-rebalance-debug-vrdh8"] Jan 29 12:29:36 crc kubenswrapper[4753]: I0129 12:29:36.920681 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-ring-rebalance-debug-vrdh8" Jan 29 12:29:36 crc kubenswrapper[4753]: I0129 12:29:36.924516 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"swift-kuttl-tests"/"swift-ring-config-data" Jan 29 12:29:36 crc kubenswrapper[4753]: I0129 12:29:36.924518 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"swift-kuttl-tests"/"swift-ring-scripts" Jan 29 12:29:36 crc kubenswrapper[4753]: I0129 12:29:36.934356 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/swift-ring-rebalance-debug-vrdh8"] Jan 29 12:29:37 crc kubenswrapper[4753]: I0129 12:29:37.105976 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/8599dd71-db90-4431-a3b9-6a4dab2feb04-etc-swift\") pod \"swift-ring-rebalance-debug-vrdh8\" (UID: \"8599dd71-db90-4431-a3b9-6a4dab2feb04\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-vrdh8" Jan 29 12:29:37 crc kubenswrapper[4753]: I0129 12:29:37.106097 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/8599dd71-db90-4431-a3b9-6a4dab2feb04-dispersionconf\") pod \"swift-ring-rebalance-debug-vrdh8\" (UID: \"8599dd71-db90-4431-a3b9-6a4dab2feb04\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-vrdh8" Jan 29 12:29:37 crc kubenswrapper[4753]: I0129 12:29:37.106299 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8599dd71-db90-4431-a3b9-6a4dab2feb04-scripts\") pod \"swift-ring-rebalance-debug-vrdh8\" (UID: \"8599dd71-db90-4431-a3b9-6a4dab2feb04\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-vrdh8" Jan 29 12:29:37 crc kubenswrapper[4753]: I0129 12:29:37.106540 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/8599dd71-db90-4431-a3b9-6a4dab2feb04-ring-data-devices\") pod \"swift-ring-rebalance-debug-vrdh8\" (UID: \"8599dd71-db90-4431-a3b9-6a4dab2feb04\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-vrdh8" Jan 29 12:29:37 crc kubenswrapper[4753]: I0129 12:29:37.106818 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/8599dd71-db90-4431-a3b9-6a4dab2feb04-swiftconf\") pod \"swift-ring-rebalance-debug-vrdh8\" (UID: \"8599dd71-db90-4431-a3b9-6a4dab2feb04\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-vrdh8" Jan 29 12:29:37 crc kubenswrapper[4753]: I0129 12:29:37.106962 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d4zbp\" (UniqueName: \"kubernetes.io/projected/8599dd71-db90-4431-a3b9-6a4dab2feb04-kube-api-access-d4zbp\") pod \"swift-ring-rebalance-debug-vrdh8\" (UID: \"8599dd71-db90-4431-a3b9-6a4dab2feb04\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-vrdh8" Jan 29 12:29:37 crc kubenswrapper[4753]: I0129 12:29:37.208905 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/8599dd71-db90-4431-a3b9-6a4dab2feb04-ring-data-devices\") pod \"swift-ring-rebalance-debug-vrdh8\" (UID: \"8599dd71-db90-4431-a3b9-6a4dab2feb04\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-vrdh8" Jan 29 12:29:37 crc kubenswrapper[4753]: I0129 12:29:37.209303 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/8599dd71-db90-4431-a3b9-6a4dab2feb04-swiftconf\") pod \"swift-ring-rebalance-debug-vrdh8\" (UID: \"8599dd71-db90-4431-a3b9-6a4dab2feb04\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-vrdh8" Jan 29 12:29:37 crc kubenswrapper[4753]: I0129 12:29:37.209377 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d4zbp\" (UniqueName: \"kubernetes.io/projected/8599dd71-db90-4431-a3b9-6a4dab2feb04-kube-api-access-d4zbp\") pod \"swift-ring-rebalance-debug-vrdh8\" (UID: \"8599dd71-db90-4431-a3b9-6a4dab2feb04\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-vrdh8" Jan 29 12:29:37 crc kubenswrapper[4753]: I0129 12:29:37.209458 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/8599dd71-db90-4431-a3b9-6a4dab2feb04-etc-swift\") pod \"swift-ring-rebalance-debug-vrdh8\" (UID: \"8599dd71-db90-4431-a3b9-6a4dab2feb04\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-vrdh8" Jan 29 12:29:37 crc kubenswrapper[4753]: I0129 12:29:37.209475 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/8599dd71-db90-4431-a3b9-6a4dab2feb04-dispersionconf\") pod \"swift-ring-rebalance-debug-vrdh8\" (UID: \"8599dd71-db90-4431-a3b9-6a4dab2feb04\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-vrdh8" Jan 29 12:29:37 crc kubenswrapper[4753]: I0129 12:29:37.209497 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8599dd71-db90-4431-a3b9-6a4dab2feb04-scripts\") pod \"swift-ring-rebalance-debug-vrdh8\" (UID: \"8599dd71-db90-4431-a3b9-6a4dab2feb04\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-vrdh8" Jan 29 12:29:37 crc kubenswrapper[4753]: I0129 12:29:37.209895 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/8599dd71-db90-4431-a3b9-6a4dab2feb04-ring-data-devices\") pod \"swift-ring-rebalance-debug-vrdh8\" (UID: \"8599dd71-db90-4431-a3b9-6a4dab2feb04\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-vrdh8" Jan 29 12:29:37 crc kubenswrapper[4753]: I0129 12:29:37.210108 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/8599dd71-db90-4431-a3b9-6a4dab2feb04-etc-swift\") pod \"swift-ring-rebalance-debug-vrdh8\" (UID: \"8599dd71-db90-4431-a3b9-6a4dab2feb04\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-vrdh8" Jan 29 12:29:37 crc kubenswrapper[4753]: I0129 12:29:37.210450 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8599dd71-db90-4431-a3b9-6a4dab2feb04-scripts\") pod \"swift-ring-rebalance-debug-vrdh8\" (UID: \"8599dd71-db90-4431-a3b9-6a4dab2feb04\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-vrdh8" Jan 29 12:29:37 crc kubenswrapper[4753]: I0129 12:29:37.216066 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/8599dd71-db90-4431-a3b9-6a4dab2feb04-swiftconf\") pod \"swift-ring-rebalance-debug-vrdh8\" (UID: \"8599dd71-db90-4431-a3b9-6a4dab2feb04\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-vrdh8" Jan 29 12:29:37 crc kubenswrapper[4753]: I0129 12:29:37.216066 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/8599dd71-db90-4431-a3b9-6a4dab2feb04-dispersionconf\") pod \"swift-ring-rebalance-debug-vrdh8\" (UID: \"8599dd71-db90-4431-a3b9-6a4dab2feb04\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-vrdh8" Jan 29 12:29:37 crc kubenswrapper[4753]: I0129 12:29:37.226965 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d4zbp\" (UniqueName: \"kubernetes.io/projected/8599dd71-db90-4431-a3b9-6a4dab2feb04-kube-api-access-d4zbp\") pod \"swift-ring-rebalance-debug-vrdh8\" (UID: \"8599dd71-db90-4431-a3b9-6a4dab2feb04\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-vrdh8" Jan 29 12:29:37 crc kubenswrapper[4753]: I0129 12:29:37.245015 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-ring-rebalance-debug-vrdh8" Jan 29 12:29:37 crc kubenswrapper[4753]: I0129 12:29:37.693831 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/swift-ring-rebalance-debug-vrdh8"] Jan 29 12:29:38 crc kubenswrapper[4753]: I0129 12:29:38.556111 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-ring-rebalance-debug-vrdh8" event={"ID":"8599dd71-db90-4431-a3b9-6a4dab2feb04","Type":"ContainerStarted","Data":"3791fb6ba9c11ddfe0a0e2b33aa5070e1fa3a259202dd953348f149fe308359c"} Jan 29 12:29:38 crc kubenswrapper[4753]: I0129 12:29:38.556536 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-ring-rebalance-debug-vrdh8" event={"ID":"8599dd71-db90-4431-a3b9-6a4dab2feb04","Type":"ContainerStarted","Data":"a6f3a367cfacd3744a8d8566bc631bfe8713c36944d8c9072a8c1dcc7e4b9eae"} Jan 29 12:29:40 crc kubenswrapper[4753]: I0129 12:29:40.573568 4753 generic.go:334] "Generic (PLEG): container finished" podID="8599dd71-db90-4431-a3b9-6a4dab2feb04" containerID="3791fb6ba9c11ddfe0a0e2b33aa5070e1fa3a259202dd953348f149fe308359c" exitCode=0 Jan 29 12:29:40 crc kubenswrapper[4753]: I0129 12:29:40.573614 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-ring-rebalance-debug-vrdh8" event={"ID":"8599dd71-db90-4431-a3b9-6a4dab2feb04","Type":"ContainerDied","Data":"3791fb6ba9c11ddfe0a0e2b33aa5070e1fa3a259202dd953348f149fe308359c"} Jan 29 12:29:42 crc kubenswrapper[4753]: I0129 12:29:42.003631 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-ring-rebalance-debug-vrdh8" Jan 29 12:29:42 crc kubenswrapper[4753]: I0129 12:29:42.037362 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["swift-kuttl-tests/swift-ring-rebalance-debug-vrdh8"] Jan 29 12:29:42 crc kubenswrapper[4753]: I0129 12:29:42.044007 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["swift-kuttl-tests/swift-ring-rebalance-debug-vrdh8"] Jan 29 12:29:42 crc kubenswrapper[4753]: I0129 12:29:42.181378 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["swift-kuttl-tests/swift-ring-rebalance-debug-dkb5h"] Jan 29 12:29:42 crc kubenswrapper[4753]: E0129 12:29:42.181964 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8599dd71-db90-4431-a3b9-6a4dab2feb04" containerName="swift-ring-rebalance" Jan 29 12:29:42 crc kubenswrapper[4753]: I0129 12:29:42.182028 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="8599dd71-db90-4431-a3b9-6a4dab2feb04" containerName="swift-ring-rebalance" Jan 29 12:29:42 crc kubenswrapper[4753]: I0129 12:29:42.182439 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="8599dd71-db90-4431-a3b9-6a4dab2feb04" containerName="swift-ring-rebalance" Jan 29 12:29:42 crc kubenswrapper[4753]: I0129 12:29:42.183274 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-ring-rebalance-debug-dkb5h" Jan 29 12:29:42 crc kubenswrapper[4753]: I0129 12:29:42.190166 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/swift-ring-rebalance-debug-dkb5h"] Jan 29 12:29:42 crc kubenswrapper[4753]: I0129 12:29:42.200870 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8599dd71-db90-4431-a3b9-6a4dab2feb04-scripts\") pod \"8599dd71-db90-4431-a3b9-6a4dab2feb04\" (UID: \"8599dd71-db90-4431-a3b9-6a4dab2feb04\") " Jan 29 12:29:42 crc kubenswrapper[4753]: I0129 12:29:42.201031 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/8599dd71-db90-4431-a3b9-6a4dab2feb04-etc-swift\") pod \"8599dd71-db90-4431-a3b9-6a4dab2feb04\" (UID: \"8599dd71-db90-4431-a3b9-6a4dab2feb04\") " Jan 29 12:29:42 crc kubenswrapper[4753]: I0129 12:29:42.201101 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/8599dd71-db90-4431-a3b9-6a4dab2feb04-swiftconf\") pod \"8599dd71-db90-4431-a3b9-6a4dab2feb04\" (UID: \"8599dd71-db90-4431-a3b9-6a4dab2feb04\") " Jan 29 12:29:42 crc kubenswrapper[4753]: I0129 12:29:42.201122 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/8599dd71-db90-4431-a3b9-6a4dab2feb04-dispersionconf\") pod \"8599dd71-db90-4431-a3b9-6a4dab2feb04\" (UID: \"8599dd71-db90-4431-a3b9-6a4dab2feb04\") " Jan 29 12:29:42 crc kubenswrapper[4753]: I0129 12:29:42.201163 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/8599dd71-db90-4431-a3b9-6a4dab2feb04-ring-data-devices\") pod \"8599dd71-db90-4431-a3b9-6a4dab2feb04\" (UID: \"8599dd71-db90-4431-a3b9-6a4dab2feb04\") " Jan 29 12:29:42 crc kubenswrapper[4753]: I0129 12:29:42.201185 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4zbp\" (UniqueName: \"kubernetes.io/projected/8599dd71-db90-4431-a3b9-6a4dab2feb04-kube-api-access-d4zbp\") pod \"8599dd71-db90-4431-a3b9-6a4dab2feb04\" (UID: \"8599dd71-db90-4431-a3b9-6a4dab2feb04\") " Jan 29 12:29:42 crc kubenswrapper[4753]: I0129 12:29:42.201392 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/c147b776-3de6-4314-a2b0-c2a8a639a0e8-etc-swift\") pod \"swift-ring-rebalance-debug-dkb5h\" (UID: \"c147b776-3de6-4314-a2b0-c2a8a639a0e8\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-dkb5h" Jan 29 12:29:42 crc kubenswrapper[4753]: I0129 12:29:42.201444 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c147b776-3de6-4314-a2b0-c2a8a639a0e8-scripts\") pod \"swift-ring-rebalance-debug-dkb5h\" (UID: \"c147b776-3de6-4314-a2b0-c2a8a639a0e8\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-dkb5h" Jan 29 12:29:42 crc kubenswrapper[4753]: I0129 12:29:42.201489 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/c147b776-3de6-4314-a2b0-c2a8a639a0e8-dispersionconf\") pod \"swift-ring-rebalance-debug-dkb5h\" (UID: \"c147b776-3de6-4314-a2b0-c2a8a639a0e8\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-dkb5h" Jan 29 12:29:42 crc kubenswrapper[4753]: I0129 12:29:42.201520 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/c147b776-3de6-4314-a2b0-c2a8a639a0e8-ring-data-devices\") pod \"swift-ring-rebalance-debug-dkb5h\" (UID: \"c147b776-3de6-4314-a2b0-c2a8a639a0e8\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-dkb5h" Jan 29 12:29:42 crc kubenswrapper[4753]: I0129 12:29:42.201579 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/c147b776-3de6-4314-a2b0-c2a8a639a0e8-swiftconf\") pod \"swift-ring-rebalance-debug-dkb5h\" (UID: \"c147b776-3de6-4314-a2b0-c2a8a639a0e8\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-dkb5h" Jan 29 12:29:42 crc kubenswrapper[4753]: I0129 12:29:42.201634 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dg7bn\" (UniqueName: \"kubernetes.io/projected/c147b776-3de6-4314-a2b0-c2a8a639a0e8-kube-api-access-dg7bn\") pod \"swift-ring-rebalance-debug-dkb5h\" (UID: \"c147b776-3de6-4314-a2b0-c2a8a639a0e8\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-dkb5h" Jan 29 12:29:42 crc kubenswrapper[4753]: I0129 12:29:42.202734 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8599dd71-db90-4431-a3b9-6a4dab2feb04-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "8599dd71-db90-4431-a3b9-6a4dab2feb04" (UID: "8599dd71-db90-4431-a3b9-6a4dab2feb04"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:29:42 crc kubenswrapper[4753]: I0129 12:29:42.203523 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8599dd71-db90-4431-a3b9-6a4dab2feb04-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "8599dd71-db90-4431-a3b9-6a4dab2feb04" (UID: "8599dd71-db90-4431-a3b9-6a4dab2feb04"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:29:42 crc kubenswrapper[4753]: I0129 12:29:42.232274 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8599dd71-db90-4431-a3b9-6a4dab2feb04-kube-api-access-d4zbp" (OuterVolumeSpecName: "kube-api-access-d4zbp") pod "8599dd71-db90-4431-a3b9-6a4dab2feb04" (UID: "8599dd71-db90-4431-a3b9-6a4dab2feb04"). InnerVolumeSpecName "kube-api-access-d4zbp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:29:42 crc kubenswrapper[4753]: I0129 12:29:42.233530 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8599dd71-db90-4431-a3b9-6a4dab2feb04-scripts" (OuterVolumeSpecName: "scripts") pod "8599dd71-db90-4431-a3b9-6a4dab2feb04" (UID: "8599dd71-db90-4431-a3b9-6a4dab2feb04"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:29:42 crc kubenswrapper[4753]: I0129 12:29:42.235775 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8599dd71-db90-4431-a3b9-6a4dab2feb04-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "8599dd71-db90-4431-a3b9-6a4dab2feb04" (UID: "8599dd71-db90-4431-a3b9-6a4dab2feb04"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:29:42 crc kubenswrapper[4753]: I0129 12:29:42.239660 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8599dd71-db90-4431-a3b9-6a4dab2feb04-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "8599dd71-db90-4431-a3b9-6a4dab2feb04" (UID: "8599dd71-db90-4431-a3b9-6a4dab2feb04"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:29:42 crc kubenswrapper[4753]: I0129 12:29:42.303480 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/c147b776-3de6-4314-a2b0-c2a8a639a0e8-etc-swift\") pod \"swift-ring-rebalance-debug-dkb5h\" (UID: \"c147b776-3de6-4314-a2b0-c2a8a639a0e8\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-dkb5h" Jan 29 12:29:42 crc kubenswrapper[4753]: I0129 12:29:42.303552 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c147b776-3de6-4314-a2b0-c2a8a639a0e8-scripts\") pod \"swift-ring-rebalance-debug-dkb5h\" (UID: \"c147b776-3de6-4314-a2b0-c2a8a639a0e8\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-dkb5h" Jan 29 12:29:42 crc kubenswrapper[4753]: I0129 12:29:42.303608 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/c147b776-3de6-4314-a2b0-c2a8a639a0e8-ring-data-devices\") pod \"swift-ring-rebalance-debug-dkb5h\" (UID: \"c147b776-3de6-4314-a2b0-c2a8a639a0e8\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-dkb5h" Jan 29 12:29:42 crc kubenswrapper[4753]: I0129 12:29:42.303634 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/c147b776-3de6-4314-a2b0-c2a8a639a0e8-dispersionconf\") pod \"swift-ring-rebalance-debug-dkb5h\" (UID: \"c147b776-3de6-4314-a2b0-c2a8a639a0e8\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-dkb5h" Jan 29 12:29:42 crc kubenswrapper[4753]: I0129 12:29:42.303694 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/c147b776-3de6-4314-a2b0-c2a8a639a0e8-swiftconf\") pod \"swift-ring-rebalance-debug-dkb5h\" (UID: \"c147b776-3de6-4314-a2b0-c2a8a639a0e8\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-dkb5h" Jan 29 12:29:42 crc kubenswrapper[4753]: I0129 12:29:42.303742 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dg7bn\" (UniqueName: \"kubernetes.io/projected/c147b776-3de6-4314-a2b0-c2a8a639a0e8-kube-api-access-dg7bn\") pod \"swift-ring-rebalance-debug-dkb5h\" (UID: \"c147b776-3de6-4314-a2b0-c2a8a639a0e8\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-dkb5h" Jan 29 12:29:42 crc kubenswrapper[4753]: I0129 12:29:42.303813 4753 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/8599dd71-db90-4431-a3b9-6a4dab2feb04-ring-data-devices\") on node \"crc\" DevicePath \"\"" Jan 29 12:29:42 crc kubenswrapper[4753]: I0129 12:29:42.303828 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4zbp\" (UniqueName: \"kubernetes.io/projected/8599dd71-db90-4431-a3b9-6a4dab2feb04-kube-api-access-d4zbp\") on node \"crc\" DevicePath \"\"" Jan 29 12:29:42 crc kubenswrapper[4753]: I0129 12:29:42.303845 4753 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8599dd71-db90-4431-a3b9-6a4dab2feb04-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 12:29:42 crc kubenswrapper[4753]: I0129 12:29:42.303859 4753 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/8599dd71-db90-4431-a3b9-6a4dab2feb04-etc-swift\") on node \"crc\" DevicePath \"\"" Jan 29 12:29:42 crc kubenswrapper[4753]: I0129 12:29:42.303870 4753 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/8599dd71-db90-4431-a3b9-6a4dab2feb04-dispersionconf\") on node \"crc\" DevicePath \"\"" Jan 29 12:29:42 crc kubenswrapper[4753]: I0129 12:29:42.303881 4753 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/8599dd71-db90-4431-a3b9-6a4dab2feb04-swiftconf\") on node \"crc\" DevicePath \"\"" Jan 29 12:29:42 crc kubenswrapper[4753]: I0129 12:29:42.304991 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c147b776-3de6-4314-a2b0-c2a8a639a0e8-scripts\") pod \"swift-ring-rebalance-debug-dkb5h\" (UID: \"c147b776-3de6-4314-a2b0-c2a8a639a0e8\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-dkb5h" Jan 29 12:29:42 crc kubenswrapper[4753]: I0129 12:29:42.305197 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/c147b776-3de6-4314-a2b0-c2a8a639a0e8-ring-data-devices\") pod \"swift-ring-rebalance-debug-dkb5h\" (UID: \"c147b776-3de6-4314-a2b0-c2a8a639a0e8\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-dkb5h" Jan 29 12:29:42 crc kubenswrapper[4753]: I0129 12:29:42.305369 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/c147b776-3de6-4314-a2b0-c2a8a639a0e8-etc-swift\") pod \"swift-ring-rebalance-debug-dkb5h\" (UID: \"c147b776-3de6-4314-a2b0-c2a8a639a0e8\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-dkb5h" Jan 29 12:29:42 crc kubenswrapper[4753]: I0129 12:29:42.308473 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/c147b776-3de6-4314-a2b0-c2a8a639a0e8-dispersionconf\") pod \"swift-ring-rebalance-debug-dkb5h\" (UID: \"c147b776-3de6-4314-a2b0-c2a8a639a0e8\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-dkb5h" Jan 29 12:29:42 crc kubenswrapper[4753]: I0129 12:29:42.310174 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/c147b776-3de6-4314-a2b0-c2a8a639a0e8-swiftconf\") pod \"swift-ring-rebalance-debug-dkb5h\" (UID: \"c147b776-3de6-4314-a2b0-c2a8a639a0e8\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-dkb5h" Jan 29 12:29:42 crc kubenswrapper[4753]: I0129 12:29:42.322861 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dg7bn\" (UniqueName: \"kubernetes.io/projected/c147b776-3de6-4314-a2b0-c2a8a639a0e8-kube-api-access-dg7bn\") pod \"swift-ring-rebalance-debug-dkb5h\" (UID: \"c147b776-3de6-4314-a2b0-c2a8a639a0e8\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-dkb5h" Jan 29 12:29:42 crc kubenswrapper[4753]: I0129 12:29:42.595320 4753 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a6f3a367cfacd3744a8d8566bc631bfe8713c36944d8c9072a8c1dcc7e4b9eae" Jan 29 12:29:42 crc kubenswrapper[4753]: I0129 12:29:42.595360 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-ring-rebalance-debug-vrdh8" Jan 29 12:29:42 crc kubenswrapper[4753]: I0129 12:29:42.596710 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-ring-rebalance-debug-dkb5h" Jan 29 12:29:43 crc kubenswrapper[4753]: I0129 12:29:43.343954 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/swift-ring-rebalance-debug-dkb5h"] Jan 29 12:29:43 crc kubenswrapper[4753]: I0129 12:29:43.605213 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-ring-rebalance-debug-dkb5h" event={"ID":"c147b776-3de6-4314-a2b0-c2a8a639a0e8","Type":"ContainerStarted","Data":"2fad35715b86b9eae4b2cc0fbe7526b7d0ebdcb67b38c4796c612811ee2907ae"} Jan 29 12:29:43 crc kubenswrapper[4753]: I0129 12:29:43.605284 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-ring-rebalance-debug-dkb5h" event={"ID":"c147b776-3de6-4314-a2b0-c2a8a639a0e8","Type":"ContainerStarted","Data":"dbf2eaafe8ed9c53cda6a040a764fa354e5acc17578f43c871e8441fbf9c451c"} Jan 29 12:29:43 crc kubenswrapper[4753]: I0129 12:29:43.628716 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="swift-kuttl-tests/swift-ring-rebalance-debug-dkb5h" podStartSLOduration=1.628676658 podStartE2EDuration="1.628676658s" podCreationTimestamp="2026-01-29 12:29:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:29:43.624939972 +0000 UTC m=+1397.877021437" watchObservedRunningTime="2026-01-29 12:29:43.628676658 +0000 UTC m=+1397.880758113" Jan 29 12:29:43 crc kubenswrapper[4753]: I0129 12:29:43.898340 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8599dd71-db90-4431-a3b9-6a4dab2feb04" path="/var/lib/kubelet/pods/8599dd71-db90-4431-a3b9-6a4dab2feb04/volumes" Jan 29 12:29:45 crc kubenswrapper[4753]: I0129 12:29:45.630394 4753 generic.go:334] "Generic (PLEG): container finished" podID="c147b776-3de6-4314-a2b0-c2a8a639a0e8" containerID="2fad35715b86b9eae4b2cc0fbe7526b7d0ebdcb67b38c4796c612811ee2907ae" exitCode=0 Jan 29 12:29:45 crc kubenswrapper[4753]: I0129 12:29:45.630566 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-ring-rebalance-debug-dkb5h" event={"ID":"c147b776-3de6-4314-a2b0-c2a8a639a0e8","Type":"ContainerDied","Data":"2fad35715b86b9eae4b2cc0fbe7526b7d0ebdcb67b38c4796c612811ee2907ae"} Jan 29 12:29:46 crc kubenswrapper[4753]: I0129 12:29:46.990193 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-ring-rebalance-debug-dkb5h" Jan 29 12:29:47 crc kubenswrapper[4753]: I0129 12:29:47.024291 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["swift-kuttl-tests/swift-ring-rebalance-debug-dkb5h"] Jan 29 12:29:47 crc kubenswrapper[4753]: I0129 12:29:47.031047 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["swift-kuttl-tests/swift-ring-rebalance-debug-dkb5h"] Jan 29 12:29:47 crc kubenswrapper[4753]: I0129 12:29:47.151185 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c147b776-3de6-4314-a2b0-c2a8a639a0e8-scripts\") pod \"c147b776-3de6-4314-a2b0-c2a8a639a0e8\" (UID: \"c147b776-3de6-4314-a2b0-c2a8a639a0e8\") " Jan 29 12:29:47 crc kubenswrapper[4753]: I0129 12:29:47.151305 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/c147b776-3de6-4314-a2b0-c2a8a639a0e8-etc-swift\") pod \"c147b776-3de6-4314-a2b0-c2a8a639a0e8\" (UID: \"c147b776-3de6-4314-a2b0-c2a8a639a0e8\") " Jan 29 12:29:47 crc kubenswrapper[4753]: I0129 12:29:47.151366 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dg7bn\" (UniqueName: \"kubernetes.io/projected/c147b776-3de6-4314-a2b0-c2a8a639a0e8-kube-api-access-dg7bn\") pod \"c147b776-3de6-4314-a2b0-c2a8a639a0e8\" (UID: \"c147b776-3de6-4314-a2b0-c2a8a639a0e8\") " Jan 29 12:29:47 crc kubenswrapper[4753]: I0129 12:29:47.151417 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/c147b776-3de6-4314-a2b0-c2a8a639a0e8-ring-data-devices\") pod \"c147b776-3de6-4314-a2b0-c2a8a639a0e8\" (UID: \"c147b776-3de6-4314-a2b0-c2a8a639a0e8\") " Jan 29 12:29:47 crc kubenswrapper[4753]: I0129 12:29:47.151474 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/c147b776-3de6-4314-a2b0-c2a8a639a0e8-swiftconf\") pod \"c147b776-3de6-4314-a2b0-c2a8a639a0e8\" (UID: \"c147b776-3de6-4314-a2b0-c2a8a639a0e8\") " Jan 29 12:29:47 crc kubenswrapper[4753]: I0129 12:29:47.151517 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/c147b776-3de6-4314-a2b0-c2a8a639a0e8-dispersionconf\") pod \"c147b776-3de6-4314-a2b0-c2a8a639a0e8\" (UID: \"c147b776-3de6-4314-a2b0-c2a8a639a0e8\") " Jan 29 12:29:47 crc kubenswrapper[4753]: I0129 12:29:47.153864 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c147b776-3de6-4314-a2b0-c2a8a639a0e8-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "c147b776-3de6-4314-a2b0-c2a8a639a0e8" (UID: "c147b776-3de6-4314-a2b0-c2a8a639a0e8"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:29:47 crc kubenswrapper[4753]: I0129 12:29:47.153992 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c147b776-3de6-4314-a2b0-c2a8a639a0e8-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "c147b776-3de6-4314-a2b0-c2a8a639a0e8" (UID: "c147b776-3de6-4314-a2b0-c2a8a639a0e8"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:29:47 crc kubenswrapper[4753]: I0129 12:29:47.157839 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c147b776-3de6-4314-a2b0-c2a8a639a0e8-kube-api-access-dg7bn" (OuterVolumeSpecName: "kube-api-access-dg7bn") pod "c147b776-3de6-4314-a2b0-c2a8a639a0e8" (UID: "c147b776-3de6-4314-a2b0-c2a8a639a0e8"). InnerVolumeSpecName "kube-api-access-dg7bn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:29:47 crc kubenswrapper[4753]: I0129 12:29:47.174058 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c147b776-3de6-4314-a2b0-c2a8a639a0e8-scripts" (OuterVolumeSpecName: "scripts") pod "c147b776-3de6-4314-a2b0-c2a8a639a0e8" (UID: "c147b776-3de6-4314-a2b0-c2a8a639a0e8"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:29:47 crc kubenswrapper[4753]: I0129 12:29:47.174847 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c147b776-3de6-4314-a2b0-c2a8a639a0e8-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "c147b776-3de6-4314-a2b0-c2a8a639a0e8" (UID: "c147b776-3de6-4314-a2b0-c2a8a639a0e8"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:29:47 crc kubenswrapper[4753]: I0129 12:29:47.184504 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c147b776-3de6-4314-a2b0-c2a8a639a0e8-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "c147b776-3de6-4314-a2b0-c2a8a639a0e8" (UID: "c147b776-3de6-4314-a2b0-c2a8a639a0e8"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:29:47 crc kubenswrapper[4753]: I0129 12:29:47.253241 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dg7bn\" (UniqueName: \"kubernetes.io/projected/c147b776-3de6-4314-a2b0-c2a8a639a0e8-kube-api-access-dg7bn\") on node \"crc\" DevicePath \"\"" Jan 29 12:29:47 crc kubenswrapper[4753]: I0129 12:29:47.253283 4753 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/c147b776-3de6-4314-a2b0-c2a8a639a0e8-ring-data-devices\") on node \"crc\" DevicePath \"\"" Jan 29 12:29:47 crc kubenswrapper[4753]: I0129 12:29:47.253294 4753 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/c147b776-3de6-4314-a2b0-c2a8a639a0e8-swiftconf\") on node \"crc\" DevicePath \"\"" Jan 29 12:29:47 crc kubenswrapper[4753]: I0129 12:29:47.253303 4753 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/c147b776-3de6-4314-a2b0-c2a8a639a0e8-dispersionconf\") on node \"crc\" DevicePath \"\"" Jan 29 12:29:47 crc kubenswrapper[4753]: I0129 12:29:47.253310 4753 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c147b776-3de6-4314-a2b0-c2a8a639a0e8-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 12:29:47 crc kubenswrapper[4753]: I0129 12:29:47.253319 4753 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/c147b776-3de6-4314-a2b0-c2a8a639a0e8-etc-swift\") on node \"crc\" DevicePath \"\"" Jan 29 12:29:47 crc kubenswrapper[4753]: I0129 12:29:47.645719 4753 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="dbf2eaafe8ed9c53cda6a040a764fa354e5acc17578f43c871e8441fbf9c451c" Jan 29 12:29:47 crc kubenswrapper[4753]: I0129 12:29:47.645768 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-ring-rebalance-debug-dkb5h" Jan 29 12:29:47 crc kubenswrapper[4753]: I0129 12:29:47.898621 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c147b776-3de6-4314-a2b0-c2a8a639a0e8" path="/var/lib/kubelet/pods/c147b776-3de6-4314-a2b0-c2a8a639a0e8/volumes" Jan 29 12:29:49 crc kubenswrapper[4753]: I0129 12:29:49.858021 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["swift-kuttl-tests/swift-ring-rebalance-debug-nvhsv"] Jan 29 12:29:49 crc kubenswrapper[4753]: E0129 12:29:49.858426 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c147b776-3de6-4314-a2b0-c2a8a639a0e8" containerName="swift-ring-rebalance" Jan 29 12:29:49 crc kubenswrapper[4753]: I0129 12:29:49.858442 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="c147b776-3de6-4314-a2b0-c2a8a639a0e8" containerName="swift-ring-rebalance" Jan 29 12:29:49 crc kubenswrapper[4753]: I0129 12:29:49.858611 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="c147b776-3de6-4314-a2b0-c2a8a639a0e8" containerName="swift-ring-rebalance" Jan 29 12:29:49 crc kubenswrapper[4753]: I0129 12:29:49.859245 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-ring-rebalance-debug-nvhsv" Jan 29 12:29:49 crc kubenswrapper[4753]: I0129 12:29:49.861998 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"swift-kuttl-tests"/"swift-ring-config-data" Jan 29 12:29:49 crc kubenswrapper[4753]: I0129 12:29:49.862093 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"swift-kuttl-tests"/"swift-ring-scripts" Jan 29 12:29:49 crc kubenswrapper[4753]: I0129 12:29:49.864695 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/swift-ring-rebalance-debug-nvhsv"] Jan 29 12:29:49 crc kubenswrapper[4753]: I0129 12:29:49.982412 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/64a043e1-4572-4153-bc9f-83f458d2a980-etc-swift\") pod \"swift-ring-rebalance-debug-nvhsv\" (UID: \"64a043e1-4572-4153-bc9f-83f458d2a980\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-nvhsv" Jan 29 12:29:49 crc kubenswrapper[4753]: I0129 12:29:49.982493 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7mz94\" (UniqueName: \"kubernetes.io/projected/64a043e1-4572-4153-bc9f-83f458d2a980-kube-api-access-7mz94\") pod \"swift-ring-rebalance-debug-nvhsv\" (UID: \"64a043e1-4572-4153-bc9f-83f458d2a980\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-nvhsv" Jan 29 12:29:49 crc kubenswrapper[4753]: I0129 12:29:49.982553 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/64a043e1-4572-4153-bc9f-83f458d2a980-swiftconf\") pod \"swift-ring-rebalance-debug-nvhsv\" (UID: \"64a043e1-4572-4153-bc9f-83f458d2a980\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-nvhsv" Jan 29 12:29:49 crc kubenswrapper[4753]: I0129 12:29:49.982709 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/64a043e1-4572-4153-bc9f-83f458d2a980-scripts\") pod \"swift-ring-rebalance-debug-nvhsv\" (UID: \"64a043e1-4572-4153-bc9f-83f458d2a980\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-nvhsv" Jan 29 12:29:49 crc kubenswrapper[4753]: I0129 12:29:49.982744 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/64a043e1-4572-4153-bc9f-83f458d2a980-ring-data-devices\") pod \"swift-ring-rebalance-debug-nvhsv\" (UID: \"64a043e1-4572-4153-bc9f-83f458d2a980\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-nvhsv" Jan 29 12:29:49 crc kubenswrapper[4753]: I0129 12:29:49.982805 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/64a043e1-4572-4153-bc9f-83f458d2a980-dispersionconf\") pod \"swift-ring-rebalance-debug-nvhsv\" (UID: \"64a043e1-4572-4153-bc9f-83f458d2a980\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-nvhsv" Jan 29 12:29:50 crc kubenswrapper[4753]: I0129 12:29:50.098204 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/64a043e1-4572-4153-bc9f-83f458d2a980-scripts\") pod \"swift-ring-rebalance-debug-nvhsv\" (UID: \"64a043e1-4572-4153-bc9f-83f458d2a980\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-nvhsv" Jan 29 12:29:50 crc kubenswrapper[4753]: I0129 12:29:50.098570 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/64a043e1-4572-4153-bc9f-83f458d2a980-ring-data-devices\") pod \"swift-ring-rebalance-debug-nvhsv\" (UID: \"64a043e1-4572-4153-bc9f-83f458d2a980\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-nvhsv" Jan 29 12:29:50 crc kubenswrapper[4753]: I0129 12:29:50.098647 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/64a043e1-4572-4153-bc9f-83f458d2a980-dispersionconf\") pod \"swift-ring-rebalance-debug-nvhsv\" (UID: \"64a043e1-4572-4153-bc9f-83f458d2a980\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-nvhsv" Jan 29 12:29:50 crc kubenswrapper[4753]: I0129 12:29:50.098789 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/64a043e1-4572-4153-bc9f-83f458d2a980-etc-swift\") pod \"swift-ring-rebalance-debug-nvhsv\" (UID: \"64a043e1-4572-4153-bc9f-83f458d2a980\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-nvhsv" Jan 29 12:29:50 crc kubenswrapper[4753]: I0129 12:29:50.098818 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7mz94\" (UniqueName: \"kubernetes.io/projected/64a043e1-4572-4153-bc9f-83f458d2a980-kube-api-access-7mz94\") pod \"swift-ring-rebalance-debug-nvhsv\" (UID: \"64a043e1-4572-4153-bc9f-83f458d2a980\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-nvhsv" Jan 29 12:29:50 crc kubenswrapper[4753]: I0129 12:29:50.098870 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/64a043e1-4572-4153-bc9f-83f458d2a980-swiftconf\") pod \"swift-ring-rebalance-debug-nvhsv\" (UID: \"64a043e1-4572-4153-bc9f-83f458d2a980\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-nvhsv" Jan 29 12:29:50 crc kubenswrapper[4753]: I0129 12:29:50.099048 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/64a043e1-4572-4153-bc9f-83f458d2a980-scripts\") pod \"swift-ring-rebalance-debug-nvhsv\" (UID: \"64a043e1-4572-4153-bc9f-83f458d2a980\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-nvhsv" Jan 29 12:29:50 crc kubenswrapper[4753]: I0129 12:29:50.099592 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/64a043e1-4572-4153-bc9f-83f458d2a980-ring-data-devices\") pod \"swift-ring-rebalance-debug-nvhsv\" (UID: \"64a043e1-4572-4153-bc9f-83f458d2a980\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-nvhsv" Jan 29 12:29:50 crc kubenswrapper[4753]: I0129 12:29:50.099879 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/64a043e1-4572-4153-bc9f-83f458d2a980-etc-swift\") pod \"swift-ring-rebalance-debug-nvhsv\" (UID: \"64a043e1-4572-4153-bc9f-83f458d2a980\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-nvhsv" Jan 29 12:29:50 crc kubenswrapper[4753]: I0129 12:29:50.105142 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/64a043e1-4572-4153-bc9f-83f458d2a980-swiftconf\") pod \"swift-ring-rebalance-debug-nvhsv\" (UID: \"64a043e1-4572-4153-bc9f-83f458d2a980\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-nvhsv" Jan 29 12:29:50 crc kubenswrapper[4753]: I0129 12:29:50.114374 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/64a043e1-4572-4153-bc9f-83f458d2a980-dispersionconf\") pod \"swift-ring-rebalance-debug-nvhsv\" (UID: \"64a043e1-4572-4153-bc9f-83f458d2a980\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-nvhsv" Jan 29 12:29:50 crc kubenswrapper[4753]: I0129 12:29:50.115498 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7mz94\" (UniqueName: \"kubernetes.io/projected/64a043e1-4572-4153-bc9f-83f458d2a980-kube-api-access-7mz94\") pod \"swift-ring-rebalance-debug-nvhsv\" (UID: \"64a043e1-4572-4153-bc9f-83f458d2a980\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-nvhsv" Jan 29 12:29:50 crc kubenswrapper[4753]: I0129 12:29:50.177811 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-ring-rebalance-debug-nvhsv" Jan 29 12:29:50 crc kubenswrapper[4753]: I0129 12:29:50.597537 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/swift-ring-rebalance-debug-nvhsv"] Jan 29 12:29:50 crc kubenswrapper[4753]: W0129 12:29:50.602157 4753 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod64a043e1_4572_4153_bc9f_83f458d2a980.slice/crio-2eb0f3e9bb98435d30398324ccc9de3027f8505783e7caa187428764d45a5d8b WatchSource:0}: Error finding container 2eb0f3e9bb98435d30398324ccc9de3027f8505783e7caa187428764d45a5d8b: Status 404 returned error can't find the container with id 2eb0f3e9bb98435d30398324ccc9de3027f8505783e7caa187428764d45a5d8b Jan 29 12:29:50 crc kubenswrapper[4753]: I0129 12:29:50.819786 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-ring-rebalance-debug-nvhsv" event={"ID":"64a043e1-4572-4153-bc9f-83f458d2a980","Type":"ContainerStarted","Data":"2eb0f3e9bb98435d30398324ccc9de3027f8505783e7caa187428764d45a5d8b"} Jan 29 12:29:51 crc kubenswrapper[4753]: I0129 12:29:51.829427 4753 generic.go:334] "Generic (PLEG): container finished" podID="64a043e1-4572-4153-bc9f-83f458d2a980" containerID="58deb117d5359924b3f99835fa5292fa3ff5c76eb5638e039f722f19b5d24784" exitCode=0 Jan 29 12:29:51 crc kubenswrapper[4753]: I0129 12:29:51.829579 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-ring-rebalance-debug-nvhsv" event={"ID":"64a043e1-4572-4153-bc9f-83f458d2a980","Type":"ContainerDied","Data":"58deb117d5359924b3f99835fa5292fa3ff5c76eb5638e039f722f19b5d24784"} Jan 29 12:29:51 crc kubenswrapper[4753]: I0129 12:29:51.898482 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["swift-kuttl-tests/swift-ring-rebalance-debug-nvhsv"] Jan 29 12:29:51 crc kubenswrapper[4753]: I0129 12:29:51.901904 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["swift-kuttl-tests/swift-ring-rebalance-debug-nvhsv"] Jan 29 12:29:52 crc kubenswrapper[4753]: I0129 12:29:52.211948 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["swift-kuttl-tests/swift-ring-rebalance-7494v"] Jan 29 12:29:52 crc kubenswrapper[4753]: I0129 12:29:52.218125 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["swift-kuttl-tests/swift-ring-rebalance-7494v"] Jan 29 12:29:52 crc kubenswrapper[4753]: I0129 12:29:52.252790 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["swift-kuttl-tests/swift-storage-0"] Jan 29 12:29:52 crc kubenswrapper[4753]: I0129 12:29:52.254300 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-0" podUID="138a8739-c950-4fe4-9aaa-e232bdea3f7b" containerName="account-server" containerID="cri-o://d2af7af180031de8fdd0983850a21bbb0dc7bc730ec666598e572f49cb56c2a4" gracePeriod=30 Jan 29 12:29:52 crc kubenswrapper[4753]: I0129 12:29:52.331793 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-0" podUID="138a8739-c950-4fe4-9aaa-e232bdea3f7b" containerName="object-server" containerID="cri-o://e9fae1d0304d1a6d261a833051d3006fa143b16cd6d6e8cbeb4a066535097794" gracePeriod=30 Jan 29 12:29:52 crc kubenswrapper[4753]: I0129 12:29:52.332175 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-0" podUID="138a8739-c950-4fe4-9aaa-e232bdea3f7b" containerName="container-sharder" containerID="cri-o://bb19f71c8e939cc414644c25eaf5efc7de06e81b24e457c1ea9f7a902dcce561" gracePeriod=30 Jan 29 12:29:52 crc kubenswrapper[4753]: I0129 12:29:52.332349 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-0" podUID="138a8739-c950-4fe4-9aaa-e232bdea3f7b" containerName="swift-recon-cron" containerID="cri-o://7fcbcb12053c0515810e35d7051fa5695d83d9eb527057e515134b63851a5c9a" gracePeriod=30 Jan 29 12:29:52 crc kubenswrapper[4753]: I0129 12:29:52.332463 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-0" podUID="138a8739-c950-4fe4-9aaa-e232bdea3f7b" containerName="rsync" containerID="cri-o://40737a21840cfda7ec2c3c77af6e8921570fae2535ec52d19c97b4e32a6925e5" gracePeriod=30 Jan 29 12:29:52 crc kubenswrapper[4753]: I0129 12:29:52.332575 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-0" podUID="138a8739-c950-4fe4-9aaa-e232bdea3f7b" containerName="object-expirer" containerID="cri-o://5b83e1721f6dc650c1c22675c76d28414a7df673068b610229a08310233f3430" gracePeriod=30 Jan 29 12:29:52 crc kubenswrapper[4753]: I0129 12:29:52.332656 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-0" podUID="138a8739-c950-4fe4-9aaa-e232bdea3f7b" containerName="object-updater" containerID="cri-o://c3746f53cf45602790eb986ea13b559998358657e337ca9a64e63b7065410941" gracePeriod=30 Jan 29 12:29:52 crc kubenswrapper[4753]: I0129 12:29:52.332737 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-0" podUID="138a8739-c950-4fe4-9aaa-e232bdea3f7b" containerName="object-auditor" containerID="cri-o://e5782ff28242ca4b5c6270a37148829832413394f867d4c8a8b48f28cd77d95b" gracePeriod=30 Jan 29 12:29:52 crc kubenswrapper[4753]: I0129 12:29:52.332876 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-0" podUID="138a8739-c950-4fe4-9aaa-e232bdea3f7b" containerName="object-replicator" containerID="cri-o://83c3bedf95a58e8d14219b47ae5fb9e7f9e6c837be7170b58c5b00ab6bb13bd3" gracePeriod=30 Jan 29 12:29:52 crc kubenswrapper[4753]: I0129 12:29:52.333315 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-0" podUID="138a8739-c950-4fe4-9aaa-e232bdea3f7b" containerName="account-reaper" containerID="cri-o://cea3edd31c2b81739cf355a1b06782457367cb6df6f5563b24c73f09b71ddbf9" gracePeriod=30 Jan 29 12:29:52 crc kubenswrapper[4753]: I0129 12:29:52.333423 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-0" podUID="138a8739-c950-4fe4-9aaa-e232bdea3f7b" containerName="container-updater" containerID="cri-o://aae9e4a99411d122b8ed7b019b9e7329b1b82581947c665733e25cb2ccf08256" gracePeriod=30 Jan 29 12:29:52 crc kubenswrapper[4753]: I0129 12:29:52.333500 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-0" podUID="138a8739-c950-4fe4-9aaa-e232bdea3f7b" containerName="container-auditor" containerID="cri-o://9bda9aa7a80e8ba90786ee356d5874c29c2779adfc8d54c3a2dccdc45759191f" gracePeriod=30 Jan 29 12:29:52 crc kubenswrapper[4753]: I0129 12:29:52.333588 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-0" podUID="138a8739-c950-4fe4-9aaa-e232bdea3f7b" containerName="container-replicator" containerID="cri-o://b31c0f4794d0d19b53ee6f57a63b973e041e1db6694a61130bdd7106c83c286a" gracePeriod=30 Jan 29 12:29:52 crc kubenswrapper[4753]: I0129 12:29:52.333665 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-0" podUID="138a8739-c950-4fe4-9aaa-e232bdea3f7b" containerName="container-server" containerID="cri-o://87187568015a0e156148a44f379441b4861da67970bece8b4474914aa31f9dba" gracePeriod=30 Jan 29 12:29:52 crc kubenswrapper[4753]: I0129 12:29:52.333806 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-0" podUID="138a8739-c950-4fe4-9aaa-e232bdea3f7b" containerName="account-replicator" containerID="cri-o://6fb444f8a28cd99977a4bf8425096045f77ba4946835c63cbdeadef200038963" gracePeriod=30 Jan 29 12:29:52 crc kubenswrapper[4753]: I0129 12:29:52.333918 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-0" podUID="138a8739-c950-4fe4-9aaa-e232bdea3f7b" containerName="account-auditor" containerID="cri-o://da235b77fd3100fc09167c2bfa1fe6dcd9d3796d72e3c97fa902c454142b0594" gracePeriod=30 Jan 29 12:29:52 crc kubenswrapper[4753]: I0129 12:29:52.569246 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["swift-kuttl-tests/swift-proxy-7769498bdc-462lm"] Jan 29 12:29:52 crc kubenswrapper[4753]: I0129 12:29:52.569536 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-proxy-7769498bdc-462lm" podUID="9a39086c-d994-44cb-a300-ee10175b234e" containerName="proxy-httpd" containerID="cri-o://70c7c82643874e91a115ede43c1fc951dbdee8c165507e3453504fe71e2b17bb" gracePeriod=30 Jan 29 12:29:52 crc kubenswrapper[4753]: I0129 12:29:52.569671 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-proxy-7769498bdc-462lm" podUID="9a39086c-d994-44cb-a300-ee10175b234e" containerName="proxy-server" containerID="cri-o://2297f3f32e7f61f7be1ba11eb96ea1b22f792effd89eaabd82a9e04ea69b8093" gracePeriod=30 Jan 29 12:29:52 crc kubenswrapper[4753]: I0129 12:29:52.884529 4753 generic.go:334] "Generic (PLEG): container finished" podID="138a8739-c950-4fe4-9aaa-e232bdea3f7b" containerID="bb19f71c8e939cc414644c25eaf5efc7de06e81b24e457c1ea9f7a902dcce561" exitCode=0 Jan 29 12:29:52 crc kubenswrapper[4753]: I0129 12:29:52.884873 4753 generic.go:334] "Generic (PLEG): container finished" podID="138a8739-c950-4fe4-9aaa-e232bdea3f7b" containerID="5b83e1721f6dc650c1c22675c76d28414a7df673068b610229a08310233f3430" exitCode=0 Jan 29 12:29:52 crc kubenswrapper[4753]: I0129 12:29:52.884884 4753 generic.go:334] "Generic (PLEG): container finished" podID="138a8739-c950-4fe4-9aaa-e232bdea3f7b" containerID="c3746f53cf45602790eb986ea13b559998358657e337ca9a64e63b7065410941" exitCode=0 Jan 29 12:29:52 crc kubenswrapper[4753]: I0129 12:29:52.884890 4753 generic.go:334] "Generic (PLEG): container finished" podID="138a8739-c950-4fe4-9aaa-e232bdea3f7b" containerID="e5782ff28242ca4b5c6270a37148829832413394f867d4c8a8b48f28cd77d95b" exitCode=0 Jan 29 12:29:52 crc kubenswrapper[4753]: I0129 12:29:52.884903 4753 generic.go:334] "Generic (PLEG): container finished" podID="138a8739-c950-4fe4-9aaa-e232bdea3f7b" containerID="83c3bedf95a58e8d14219b47ae5fb9e7f9e6c837be7170b58c5b00ab6bb13bd3" exitCode=0 Jan 29 12:29:52 crc kubenswrapper[4753]: I0129 12:29:52.884911 4753 generic.go:334] "Generic (PLEG): container finished" podID="138a8739-c950-4fe4-9aaa-e232bdea3f7b" containerID="aae9e4a99411d122b8ed7b019b9e7329b1b82581947c665733e25cb2ccf08256" exitCode=0 Jan 29 12:29:52 crc kubenswrapper[4753]: I0129 12:29:52.884917 4753 generic.go:334] "Generic (PLEG): container finished" podID="138a8739-c950-4fe4-9aaa-e232bdea3f7b" containerID="9bda9aa7a80e8ba90786ee356d5874c29c2779adfc8d54c3a2dccdc45759191f" exitCode=0 Jan 29 12:29:52 crc kubenswrapper[4753]: I0129 12:29:52.884926 4753 generic.go:334] "Generic (PLEG): container finished" podID="138a8739-c950-4fe4-9aaa-e232bdea3f7b" containerID="b31c0f4794d0d19b53ee6f57a63b973e041e1db6694a61130bdd7106c83c286a" exitCode=0 Jan 29 12:29:52 crc kubenswrapper[4753]: I0129 12:29:52.884932 4753 generic.go:334] "Generic (PLEG): container finished" podID="138a8739-c950-4fe4-9aaa-e232bdea3f7b" containerID="cea3edd31c2b81739cf355a1b06782457367cb6df6f5563b24c73f09b71ddbf9" exitCode=0 Jan 29 12:29:52 crc kubenswrapper[4753]: I0129 12:29:52.884938 4753 generic.go:334] "Generic (PLEG): container finished" podID="138a8739-c950-4fe4-9aaa-e232bdea3f7b" containerID="da235b77fd3100fc09167c2bfa1fe6dcd9d3796d72e3c97fa902c454142b0594" exitCode=0 Jan 29 12:29:52 crc kubenswrapper[4753]: I0129 12:29:52.884944 4753 generic.go:334] "Generic (PLEG): container finished" podID="138a8739-c950-4fe4-9aaa-e232bdea3f7b" containerID="6fb444f8a28cd99977a4bf8425096045f77ba4946835c63cbdeadef200038963" exitCode=0 Jan 29 12:29:52 crc kubenswrapper[4753]: I0129 12:29:52.884777 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"138a8739-c950-4fe4-9aaa-e232bdea3f7b","Type":"ContainerDied","Data":"bb19f71c8e939cc414644c25eaf5efc7de06e81b24e457c1ea9f7a902dcce561"} Jan 29 12:29:52 crc kubenswrapper[4753]: I0129 12:29:52.885174 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"138a8739-c950-4fe4-9aaa-e232bdea3f7b","Type":"ContainerDied","Data":"5b83e1721f6dc650c1c22675c76d28414a7df673068b610229a08310233f3430"} Jan 29 12:29:52 crc kubenswrapper[4753]: I0129 12:29:52.885191 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"138a8739-c950-4fe4-9aaa-e232bdea3f7b","Type":"ContainerDied","Data":"c3746f53cf45602790eb986ea13b559998358657e337ca9a64e63b7065410941"} Jan 29 12:29:52 crc kubenswrapper[4753]: I0129 12:29:52.885201 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"138a8739-c950-4fe4-9aaa-e232bdea3f7b","Type":"ContainerDied","Data":"e5782ff28242ca4b5c6270a37148829832413394f867d4c8a8b48f28cd77d95b"} Jan 29 12:29:52 crc kubenswrapper[4753]: I0129 12:29:52.885215 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"138a8739-c950-4fe4-9aaa-e232bdea3f7b","Type":"ContainerDied","Data":"83c3bedf95a58e8d14219b47ae5fb9e7f9e6c837be7170b58c5b00ab6bb13bd3"} Jan 29 12:29:52 crc kubenswrapper[4753]: I0129 12:29:52.885247 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"138a8739-c950-4fe4-9aaa-e232bdea3f7b","Type":"ContainerDied","Data":"aae9e4a99411d122b8ed7b019b9e7329b1b82581947c665733e25cb2ccf08256"} Jan 29 12:29:52 crc kubenswrapper[4753]: I0129 12:29:52.885257 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"138a8739-c950-4fe4-9aaa-e232bdea3f7b","Type":"ContainerDied","Data":"9bda9aa7a80e8ba90786ee356d5874c29c2779adfc8d54c3a2dccdc45759191f"} Jan 29 12:29:52 crc kubenswrapper[4753]: I0129 12:29:52.885265 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"138a8739-c950-4fe4-9aaa-e232bdea3f7b","Type":"ContainerDied","Data":"b31c0f4794d0d19b53ee6f57a63b973e041e1db6694a61130bdd7106c83c286a"} Jan 29 12:29:52 crc kubenswrapper[4753]: I0129 12:29:52.885273 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"138a8739-c950-4fe4-9aaa-e232bdea3f7b","Type":"ContainerDied","Data":"cea3edd31c2b81739cf355a1b06782457367cb6df6f5563b24c73f09b71ddbf9"} Jan 29 12:29:52 crc kubenswrapper[4753]: I0129 12:29:52.885292 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"138a8739-c950-4fe4-9aaa-e232bdea3f7b","Type":"ContainerDied","Data":"da235b77fd3100fc09167c2bfa1fe6dcd9d3796d72e3c97fa902c454142b0594"} Jan 29 12:29:52 crc kubenswrapper[4753]: I0129 12:29:52.885301 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"138a8739-c950-4fe4-9aaa-e232bdea3f7b","Type":"ContainerDied","Data":"6fb444f8a28cd99977a4bf8425096045f77ba4946835c63cbdeadef200038963"} Jan 29 12:29:53 crc kubenswrapper[4753]: I0129 12:29:53.391731 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-ring-rebalance-debug-nvhsv" Jan 29 12:29:53 crc kubenswrapper[4753]: I0129 12:29:53.450106 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/64a043e1-4572-4153-bc9f-83f458d2a980-scripts\") pod \"64a043e1-4572-4153-bc9f-83f458d2a980\" (UID: \"64a043e1-4572-4153-bc9f-83f458d2a980\") " Jan 29 12:29:53 crc kubenswrapper[4753]: I0129 12:29:53.450246 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/64a043e1-4572-4153-bc9f-83f458d2a980-swiftconf\") pod \"64a043e1-4572-4153-bc9f-83f458d2a980\" (UID: \"64a043e1-4572-4153-bc9f-83f458d2a980\") " Jan 29 12:29:53 crc kubenswrapper[4753]: I0129 12:29:53.450300 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/64a043e1-4572-4153-bc9f-83f458d2a980-etc-swift\") pod \"64a043e1-4572-4153-bc9f-83f458d2a980\" (UID: \"64a043e1-4572-4153-bc9f-83f458d2a980\") " Jan 29 12:29:53 crc kubenswrapper[4753]: I0129 12:29:53.450333 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/64a043e1-4572-4153-bc9f-83f458d2a980-dispersionconf\") pod \"64a043e1-4572-4153-bc9f-83f458d2a980\" (UID: \"64a043e1-4572-4153-bc9f-83f458d2a980\") " Jan 29 12:29:53 crc kubenswrapper[4753]: I0129 12:29:53.450400 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7mz94\" (UniqueName: \"kubernetes.io/projected/64a043e1-4572-4153-bc9f-83f458d2a980-kube-api-access-7mz94\") pod \"64a043e1-4572-4153-bc9f-83f458d2a980\" (UID: \"64a043e1-4572-4153-bc9f-83f458d2a980\") " Jan 29 12:29:53 crc kubenswrapper[4753]: I0129 12:29:53.450450 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/64a043e1-4572-4153-bc9f-83f458d2a980-ring-data-devices\") pod \"64a043e1-4572-4153-bc9f-83f458d2a980\" (UID: \"64a043e1-4572-4153-bc9f-83f458d2a980\") " Jan 29 12:29:53 crc kubenswrapper[4753]: I0129 12:29:53.451613 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/64a043e1-4572-4153-bc9f-83f458d2a980-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "64a043e1-4572-4153-bc9f-83f458d2a980" (UID: "64a043e1-4572-4153-bc9f-83f458d2a980"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:29:53 crc kubenswrapper[4753]: I0129 12:29:53.451618 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/64a043e1-4572-4153-bc9f-83f458d2a980-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "64a043e1-4572-4153-bc9f-83f458d2a980" (UID: "64a043e1-4572-4153-bc9f-83f458d2a980"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:29:53 crc kubenswrapper[4753]: I0129 12:29:53.459484 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/64a043e1-4572-4153-bc9f-83f458d2a980-kube-api-access-7mz94" (OuterVolumeSpecName: "kube-api-access-7mz94") pod "64a043e1-4572-4153-bc9f-83f458d2a980" (UID: "64a043e1-4572-4153-bc9f-83f458d2a980"). InnerVolumeSpecName "kube-api-access-7mz94". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:29:53 crc kubenswrapper[4753]: I0129 12:29:53.478699 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/64a043e1-4572-4153-bc9f-83f458d2a980-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "64a043e1-4572-4153-bc9f-83f458d2a980" (UID: "64a043e1-4572-4153-bc9f-83f458d2a980"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:29:53 crc kubenswrapper[4753]: I0129 12:29:53.479494 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/64a043e1-4572-4153-bc9f-83f458d2a980-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "64a043e1-4572-4153-bc9f-83f458d2a980" (UID: "64a043e1-4572-4153-bc9f-83f458d2a980"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:29:53 crc kubenswrapper[4753]: I0129 12:29:53.485644 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/64a043e1-4572-4153-bc9f-83f458d2a980-scripts" (OuterVolumeSpecName: "scripts") pod "64a043e1-4572-4153-bc9f-83f458d2a980" (UID: "64a043e1-4572-4153-bc9f-83f458d2a980"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:29:53 crc kubenswrapper[4753]: I0129 12:29:53.552597 4753 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/64a043e1-4572-4153-bc9f-83f458d2a980-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 12:29:53 crc kubenswrapper[4753]: I0129 12:29:53.552941 4753 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/64a043e1-4572-4153-bc9f-83f458d2a980-swiftconf\") on node \"crc\" DevicePath \"\"" Jan 29 12:29:53 crc kubenswrapper[4753]: I0129 12:29:53.552954 4753 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/64a043e1-4572-4153-bc9f-83f458d2a980-etc-swift\") on node \"crc\" DevicePath \"\"" Jan 29 12:29:53 crc kubenswrapper[4753]: I0129 12:29:53.552966 4753 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/64a043e1-4572-4153-bc9f-83f458d2a980-dispersionconf\") on node \"crc\" DevicePath \"\"" Jan 29 12:29:53 crc kubenswrapper[4753]: I0129 12:29:53.552981 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7mz94\" (UniqueName: \"kubernetes.io/projected/64a043e1-4572-4153-bc9f-83f458d2a980-kube-api-access-7mz94\") on node \"crc\" DevicePath \"\"" Jan 29 12:29:53 crc kubenswrapper[4753]: I0129 12:29:53.552994 4753 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/64a043e1-4572-4153-bc9f-83f458d2a980-ring-data-devices\") on node \"crc\" DevicePath \"\"" Jan 29 12:29:53 crc kubenswrapper[4753]: I0129 12:29:53.656830 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-proxy-7769498bdc-462lm" Jan 29 12:29:53 crc kubenswrapper[4753]: I0129 12:29:53.755917 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9a39086c-d994-44cb-a300-ee10175b234e-run-httpd\") pod \"9a39086c-d994-44cb-a300-ee10175b234e\" (UID: \"9a39086c-d994-44cb-a300-ee10175b234e\") " Jan 29 12:29:53 crc kubenswrapper[4753]: I0129 12:29:53.755989 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9a39086c-d994-44cb-a300-ee10175b234e-log-httpd\") pod \"9a39086c-d994-44cb-a300-ee10175b234e\" (UID: \"9a39086c-d994-44cb-a300-ee10175b234e\") " Jan 29 12:29:53 crc kubenswrapper[4753]: I0129 12:29:53.756127 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/9a39086c-d994-44cb-a300-ee10175b234e-etc-swift\") pod \"9a39086c-d994-44cb-a300-ee10175b234e\" (UID: \"9a39086c-d994-44cb-a300-ee10175b234e\") " Jan 29 12:29:53 crc kubenswrapper[4753]: I0129 12:29:53.756157 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9a39086c-d994-44cb-a300-ee10175b234e-config-data\") pod \"9a39086c-d994-44cb-a300-ee10175b234e\" (UID: \"9a39086c-d994-44cb-a300-ee10175b234e\") " Jan 29 12:29:53 crc kubenswrapper[4753]: I0129 12:29:53.756181 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gqj2k\" (UniqueName: \"kubernetes.io/projected/9a39086c-d994-44cb-a300-ee10175b234e-kube-api-access-gqj2k\") pod \"9a39086c-d994-44cb-a300-ee10175b234e\" (UID: \"9a39086c-d994-44cb-a300-ee10175b234e\") " Jan 29 12:29:53 crc kubenswrapper[4753]: I0129 12:29:53.756244 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9a39086c-d994-44cb-a300-ee10175b234e-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "9a39086c-d994-44cb-a300-ee10175b234e" (UID: "9a39086c-d994-44cb-a300-ee10175b234e"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:29:53 crc kubenswrapper[4753]: I0129 12:29:53.756465 4753 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9a39086c-d994-44cb-a300-ee10175b234e-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 29 12:29:53 crc kubenswrapper[4753]: I0129 12:29:53.756920 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9a39086c-d994-44cb-a300-ee10175b234e-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "9a39086c-d994-44cb-a300-ee10175b234e" (UID: "9a39086c-d994-44cb-a300-ee10175b234e"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:29:53 crc kubenswrapper[4753]: I0129 12:29:53.759950 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9a39086c-d994-44cb-a300-ee10175b234e-kube-api-access-gqj2k" (OuterVolumeSpecName: "kube-api-access-gqj2k") pod "9a39086c-d994-44cb-a300-ee10175b234e" (UID: "9a39086c-d994-44cb-a300-ee10175b234e"). InnerVolumeSpecName "kube-api-access-gqj2k". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:29:53 crc kubenswrapper[4753]: I0129 12:29:53.770422 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9a39086c-d994-44cb-a300-ee10175b234e-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "9a39086c-d994-44cb-a300-ee10175b234e" (UID: "9a39086c-d994-44cb-a300-ee10175b234e"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:29:53 crc kubenswrapper[4753]: I0129 12:29:53.795855 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9a39086c-d994-44cb-a300-ee10175b234e-config-data" (OuterVolumeSpecName: "config-data") pod "9a39086c-d994-44cb-a300-ee10175b234e" (UID: "9a39086c-d994-44cb-a300-ee10175b234e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:29:53 crc kubenswrapper[4753]: I0129 12:29:53.858198 4753 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/9a39086c-d994-44cb-a300-ee10175b234e-etc-swift\") on node \"crc\" DevicePath \"\"" Jan 29 12:29:53 crc kubenswrapper[4753]: I0129 12:29:53.858278 4753 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9a39086c-d994-44cb-a300-ee10175b234e-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 12:29:53 crc kubenswrapper[4753]: I0129 12:29:53.858296 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gqj2k\" (UniqueName: \"kubernetes.io/projected/9a39086c-d994-44cb-a300-ee10175b234e-kube-api-access-gqj2k\") on node \"crc\" DevicePath \"\"" Jan 29 12:29:53 crc kubenswrapper[4753]: I0129 12:29:53.858312 4753 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9a39086c-d994-44cb-a300-ee10175b234e-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 29 12:29:53 crc kubenswrapper[4753]: I0129 12:29:53.897084 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2b27a01f-bed5-4012-ab93-63e3095aecad" path="/var/lib/kubelet/pods/2b27a01f-bed5-4012-ab93-63e3095aecad/volumes" Jan 29 12:29:53 crc kubenswrapper[4753]: I0129 12:29:53.897692 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="64a043e1-4572-4153-bc9f-83f458d2a980" path="/var/lib/kubelet/pods/64a043e1-4572-4153-bc9f-83f458d2a980/volumes" Jan 29 12:29:53 crc kubenswrapper[4753]: I0129 12:29:53.900451 4753 generic.go:334] "Generic (PLEG): container finished" podID="138a8739-c950-4fe4-9aaa-e232bdea3f7b" containerID="40737a21840cfda7ec2c3c77af6e8921570fae2535ec52d19c97b4e32a6925e5" exitCode=0 Jan 29 12:29:53 crc kubenswrapper[4753]: I0129 12:29:53.900550 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"138a8739-c950-4fe4-9aaa-e232bdea3f7b","Type":"ContainerDied","Data":"40737a21840cfda7ec2c3c77af6e8921570fae2535ec52d19c97b4e32a6925e5"} Jan 29 12:29:53 crc kubenswrapper[4753]: I0129 12:29:53.900629 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"138a8739-c950-4fe4-9aaa-e232bdea3f7b","Type":"ContainerDied","Data":"e9fae1d0304d1a6d261a833051d3006fa143b16cd6d6e8cbeb4a066535097794"} Jan 29 12:29:53 crc kubenswrapper[4753]: I0129 12:29:53.900567 4753 generic.go:334] "Generic (PLEG): container finished" podID="138a8739-c950-4fe4-9aaa-e232bdea3f7b" containerID="e9fae1d0304d1a6d261a833051d3006fa143b16cd6d6e8cbeb4a066535097794" exitCode=0 Jan 29 12:29:53 crc kubenswrapper[4753]: I0129 12:29:53.900660 4753 generic.go:334] "Generic (PLEG): container finished" podID="138a8739-c950-4fe4-9aaa-e232bdea3f7b" containerID="87187568015a0e156148a44f379441b4861da67970bece8b4474914aa31f9dba" exitCode=0 Jan 29 12:29:53 crc kubenswrapper[4753]: I0129 12:29:53.900675 4753 generic.go:334] "Generic (PLEG): container finished" podID="138a8739-c950-4fe4-9aaa-e232bdea3f7b" containerID="d2af7af180031de8fdd0983850a21bbb0dc7bc730ec666598e572f49cb56c2a4" exitCode=0 Jan 29 12:29:53 crc kubenswrapper[4753]: I0129 12:29:53.900792 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"138a8739-c950-4fe4-9aaa-e232bdea3f7b","Type":"ContainerDied","Data":"87187568015a0e156148a44f379441b4861da67970bece8b4474914aa31f9dba"} Jan 29 12:29:53 crc kubenswrapper[4753]: I0129 12:29:53.900896 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"138a8739-c950-4fe4-9aaa-e232bdea3f7b","Type":"ContainerDied","Data":"d2af7af180031de8fdd0983850a21bbb0dc7bc730ec666598e572f49cb56c2a4"} Jan 29 12:29:53 crc kubenswrapper[4753]: I0129 12:29:53.902598 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-ring-rebalance-debug-nvhsv" Jan 29 12:29:53 crc kubenswrapper[4753]: I0129 12:29:53.902609 4753 scope.go:117] "RemoveContainer" containerID="58deb117d5359924b3f99835fa5292fa3ff5c76eb5638e039f722f19b5d24784" Jan 29 12:29:53 crc kubenswrapper[4753]: I0129 12:29:53.904495 4753 generic.go:334] "Generic (PLEG): container finished" podID="9a39086c-d994-44cb-a300-ee10175b234e" containerID="2297f3f32e7f61f7be1ba11eb96ea1b22f792effd89eaabd82a9e04ea69b8093" exitCode=0 Jan 29 12:29:53 crc kubenswrapper[4753]: I0129 12:29:53.904609 4753 generic.go:334] "Generic (PLEG): container finished" podID="9a39086c-d994-44cb-a300-ee10175b234e" containerID="70c7c82643874e91a115ede43c1fc951dbdee8c165507e3453504fe71e2b17bb" exitCode=0 Jan 29 12:29:53 crc kubenswrapper[4753]: I0129 12:29:53.904577 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-proxy-7769498bdc-462lm" event={"ID":"9a39086c-d994-44cb-a300-ee10175b234e","Type":"ContainerDied","Data":"2297f3f32e7f61f7be1ba11eb96ea1b22f792effd89eaabd82a9e04ea69b8093"} Jan 29 12:29:53 crc kubenswrapper[4753]: I0129 12:29:53.904767 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-proxy-7769498bdc-462lm" event={"ID":"9a39086c-d994-44cb-a300-ee10175b234e","Type":"ContainerDied","Data":"70c7c82643874e91a115ede43c1fc951dbdee8c165507e3453504fe71e2b17bb"} Jan 29 12:29:53 crc kubenswrapper[4753]: I0129 12:29:53.904851 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-proxy-7769498bdc-462lm" event={"ID":"9a39086c-d994-44cb-a300-ee10175b234e","Type":"ContainerDied","Data":"308bfb203819c7445a07b64d248d4e477e48663108d8025ef429e0f8daede5f0"} Jan 29 12:29:53 crc kubenswrapper[4753]: I0129 12:29:53.904559 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-proxy-7769498bdc-462lm" Jan 29 12:29:53 crc kubenswrapper[4753]: I0129 12:29:53.928642 4753 scope.go:117] "RemoveContainer" containerID="2297f3f32e7f61f7be1ba11eb96ea1b22f792effd89eaabd82a9e04ea69b8093" Jan 29 12:29:53 crc kubenswrapper[4753]: I0129 12:29:53.947854 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["swift-kuttl-tests/swift-proxy-7769498bdc-462lm"] Jan 29 12:29:53 crc kubenswrapper[4753]: I0129 12:29:53.951959 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["swift-kuttl-tests/swift-proxy-7769498bdc-462lm"] Jan 29 12:29:53 crc kubenswrapper[4753]: I0129 12:29:53.953173 4753 scope.go:117] "RemoveContainer" containerID="70c7c82643874e91a115ede43c1fc951dbdee8c165507e3453504fe71e2b17bb" Jan 29 12:29:53 crc kubenswrapper[4753]: I0129 12:29:53.973425 4753 scope.go:117] "RemoveContainer" containerID="2297f3f32e7f61f7be1ba11eb96ea1b22f792effd89eaabd82a9e04ea69b8093" Jan 29 12:29:53 crc kubenswrapper[4753]: E0129 12:29:53.974463 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2297f3f32e7f61f7be1ba11eb96ea1b22f792effd89eaabd82a9e04ea69b8093\": container with ID starting with 2297f3f32e7f61f7be1ba11eb96ea1b22f792effd89eaabd82a9e04ea69b8093 not found: ID does not exist" containerID="2297f3f32e7f61f7be1ba11eb96ea1b22f792effd89eaabd82a9e04ea69b8093" Jan 29 12:29:53 crc kubenswrapper[4753]: I0129 12:29:53.974509 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2297f3f32e7f61f7be1ba11eb96ea1b22f792effd89eaabd82a9e04ea69b8093"} err="failed to get container status \"2297f3f32e7f61f7be1ba11eb96ea1b22f792effd89eaabd82a9e04ea69b8093\": rpc error: code = NotFound desc = could not find container \"2297f3f32e7f61f7be1ba11eb96ea1b22f792effd89eaabd82a9e04ea69b8093\": container with ID starting with 2297f3f32e7f61f7be1ba11eb96ea1b22f792effd89eaabd82a9e04ea69b8093 not found: ID does not exist" Jan 29 12:29:53 crc kubenswrapper[4753]: I0129 12:29:53.974539 4753 scope.go:117] "RemoveContainer" containerID="70c7c82643874e91a115ede43c1fc951dbdee8c165507e3453504fe71e2b17bb" Jan 29 12:29:53 crc kubenswrapper[4753]: E0129 12:29:53.974838 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"70c7c82643874e91a115ede43c1fc951dbdee8c165507e3453504fe71e2b17bb\": container with ID starting with 70c7c82643874e91a115ede43c1fc951dbdee8c165507e3453504fe71e2b17bb not found: ID does not exist" containerID="70c7c82643874e91a115ede43c1fc951dbdee8c165507e3453504fe71e2b17bb" Jan 29 12:29:53 crc kubenswrapper[4753]: I0129 12:29:53.974970 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"70c7c82643874e91a115ede43c1fc951dbdee8c165507e3453504fe71e2b17bb"} err="failed to get container status \"70c7c82643874e91a115ede43c1fc951dbdee8c165507e3453504fe71e2b17bb\": rpc error: code = NotFound desc = could not find container \"70c7c82643874e91a115ede43c1fc951dbdee8c165507e3453504fe71e2b17bb\": container with ID starting with 70c7c82643874e91a115ede43c1fc951dbdee8c165507e3453504fe71e2b17bb not found: ID does not exist" Jan 29 12:29:53 crc kubenswrapper[4753]: I0129 12:29:53.974992 4753 scope.go:117] "RemoveContainer" containerID="2297f3f32e7f61f7be1ba11eb96ea1b22f792effd89eaabd82a9e04ea69b8093" Jan 29 12:29:53 crc kubenswrapper[4753]: I0129 12:29:53.976536 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2297f3f32e7f61f7be1ba11eb96ea1b22f792effd89eaabd82a9e04ea69b8093"} err="failed to get container status \"2297f3f32e7f61f7be1ba11eb96ea1b22f792effd89eaabd82a9e04ea69b8093\": rpc error: code = NotFound desc = could not find container \"2297f3f32e7f61f7be1ba11eb96ea1b22f792effd89eaabd82a9e04ea69b8093\": container with ID starting with 2297f3f32e7f61f7be1ba11eb96ea1b22f792effd89eaabd82a9e04ea69b8093 not found: ID does not exist" Jan 29 12:29:53 crc kubenswrapper[4753]: I0129 12:29:53.976606 4753 scope.go:117] "RemoveContainer" containerID="70c7c82643874e91a115ede43c1fc951dbdee8c165507e3453504fe71e2b17bb" Jan 29 12:29:53 crc kubenswrapper[4753]: I0129 12:29:53.977122 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"70c7c82643874e91a115ede43c1fc951dbdee8c165507e3453504fe71e2b17bb"} err="failed to get container status \"70c7c82643874e91a115ede43c1fc951dbdee8c165507e3453504fe71e2b17bb\": rpc error: code = NotFound desc = could not find container \"70c7c82643874e91a115ede43c1fc951dbdee8c165507e3453504fe71e2b17bb\": container with ID starting with 70c7c82643874e91a115ede43c1fc951dbdee8c165507e3453504fe71e2b17bb not found: ID does not exist" Jan 29 12:29:55 crc kubenswrapper[4753]: I0129 12:29:55.900714 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9a39086c-d994-44cb-a300-ee10175b234e" path="/var/lib/kubelet/pods/9a39086c-d994-44cb-a300-ee10175b234e/volumes" Jan 29 12:29:59 crc kubenswrapper[4753]: I0129 12:29:59.252868 4753 patch_prober.go:28] interesting pod/machine-config-daemon-7c24x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 12:29:59 crc kubenswrapper[4753]: I0129 12:29:59.254346 4753 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" podUID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 12:29:59 crc kubenswrapper[4753]: I0129 12:29:59.254494 4753 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" Jan 29 12:29:59 crc kubenswrapper[4753]: I0129 12:29:59.255342 4753 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"46e4d0e61ffa31047d03b8be433c21bd5af5bab8ccbf094d8375450be774834e"} pod="openshift-machine-config-operator/machine-config-daemon-7c24x" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 29 12:29:59 crc kubenswrapper[4753]: I0129 12:29:59.255497 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" podUID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" containerName="machine-config-daemon" containerID="cri-o://46e4d0e61ffa31047d03b8be433c21bd5af5bab8ccbf094d8375450be774834e" gracePeriod=600 Jan 29 12:30:00 crc kubenswrapper[4753]: I0129 12:30:00.142584 4753 generic.go:334] "Generic (PLEG): container finished" podID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" containerID="46e4d0e61ffa31047d03b8be433c21bd5af5bab8ccbf094d8375450be774834e" exitCode=0 Jan 29 12:30:00 crc kubenswrapper[4753]: I0129 12:30:00.142649 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" event={"ID":"b0310995-a7c7-47c3-ae6c-05daaaba92a6","Type":"ContainerDied","Data":"46e4d0e61ffa31047d03b8be433c21bd5af5bab8ccbf094d8375450be774834e"} Jan 29 12:30:00 crc kubenswrapper[4753]: I0129 12:30:00.143192 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" event={"ID":"b0310995-a7c7-47c3-ae6c-05daaaba92a6","Type":"ContainerStarted","Data":"00211d2562b6632acb04decda9045cbb8d3216adca69cd5bc1380ec42b8211e0"} Jan 29 12:30:00 crc kubenswrapper[4753]: I0129 12:30:00.143247 4753 scope.go:117] "RemoveContainer" containerID="5697d146fc311e04cb43bd311c6234230b3e6c0088cc915fea2e2ab2972df9e8" Jan 29 12:30:00 crc kubenswrapper[4753]: I0129 12:30:00.146048 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494830-dqg2p"] Jan 29 12:30:00 crc kubenswrapper[4753]: E0129 12:30:00.146392 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9a39086c-d994-44cb-a300-ee10175b234e" containerName="proxy-server" Jan 29 12:30:00 crc kubenswrapper[4753]: I0129 12:30:00.146414 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="9a39086c-d994-44cb-a300-ee10175b234e" containerName="proxy-server" Jan 29 12:30:00 crc kubenswrapper[4753]: E0129 12:30:00.146429 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9a39086c-d994-44cb-a300-ee10175b234e" containerName="proxy-httpd" Jan 29 12:30:00 crc kubenswrapper[4753]: I0129 12:30:00.146435 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="9a39086c-d994-44cb-a300-ee10175b234e" containerName="proxy-httpd" Jan 29 12:30:00 crc kubenswrapper[4753]: E0129 12:30:00.146459 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="64a043e1-4572-4153-bc9f-83f458d2a980" containerName="swift-ring-rebalance" Jan 29 12:30:00 crc kubenswrapper[4753]: I0129 12:30:00.146466 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="64a043e1-4572-4153-bc9f-83f458d2a980" containerName="swift-ring-rebalance" Jan 29 12:30:00 crc kubenswrapper[4753]: I0129 12:30:00.146595 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="9a39086c-d994-44cb-a300-ee10175b234e" containerName="proxy-httpd" Jan 29 12:30:00 crc kubenswrapper[4753]: I0129 12:30:00.146608 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="9a39086c-d994-44cb-a300-ee10175b234e" containerName="proxy-server" Jan 29 12:30:00 crc kubenswrapper[4753]: I0129 12:30:00.146628 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="64a043e1-4572-4153-bc9f-83f458d2a980" containerName="swift-ring-rebalance" Jan 29 12:30:00 crc kubenswrapper[4753]: I0129 12:30:00.147800 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494830-dqg2p" Jan 29 12:30:00 crc kubenswrapper[4753]: I0129 12:30:00.150462 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 29 12:30:00 crc kubenswrapper[4753]: I0129 12:30:00.150754 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 29 12:30:00 crc kubenswrapper[4753]: I0129 12:30:00.169626 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494830-dqg2p"] Jan 29 12:30:00 crc kubenswrapper[4753]: I0129 12:30:00.288687 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/75bb4a41-cec1-4808-9d05-35ea682d4d35-secret-volume\") pod \"collect-profiles-29494830-dqg2p\" (UID: \"75bb4a41-cec1-4808-9d05-35ea682d4d35\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494830-dqg2p" Jan 29 12:30:00 crc kubenswrapper[4753]: I0129 12:30:00.288790 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/75bb4a41-cec1-4808-9d05-35ea682d4d35-config-volume\") pod \"collect-profiles-29494830-dqg2p\" (UID: \"75bb4a41-cec1-4808-9d05-35ea682d4d35\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494830-dqg2p" Jan 29 12:30:00 crc kubenswrapper[4753]: I0129 12:30:00.288892 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6gk62\" (UniqueName: \"kubernetes.io/projected/75bb4a41-cec1-4808-9d05-35ea682d4d35-kube-api-access-6gk62\") pod \"collect-profiles-29494830-dqg2p\" (UID: \"75bb4a41-cec1-4808-9d05-35ea682d4d35\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494830-dqg2p" Jan 29 12:30:00 crc kubenswrapper[4753]: I0129 12:30:00.390966 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6gk62\" (UniqueName: \"kubernetes.io/projected/75bb4a41-cec1-4808-9d05-35ea682d4d35-kube-api-access-6gk62\") pod \"collect-profiles-29494830-dqg2p\" (UID: \"75bb4a41-cec1-4808-9d05-35ea682d4d35\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494830-dqg2p" Jan 29 12:30:00 crc kubenswrapper[4753]: I0129 12:30:00.391074 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/75bb4a41-cec1-4808-9d05-35ea682d4d35-secret-volume\") pod \"collect-profiles-29494830-dqg2p\" (UID: \"75bb4a41-cec1-4808-9d05-35ea682d4d35\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494830-dqg2p" Jan 29 12:30:00 crc kubenswrapper[4753]: I0129 12:30:00.391128 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/75bb4a41-cec1-4808-9d05-35ea682d4d35-config-volume\") pod \"collect-profiles-29494830-dqg2p\" (UID: \"75bb4a41-cec1-4808-9d05-35ea682d4d35\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494830-dqg2p" Jan 29 12:30:00 crc kubenswrapper[4753]: I0129 12:30:00.392044 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/75bb4a41-cec1-4808-9d05-35ea682d4d35-config-volume\") pod \"collect-profiles-29494830-dqg2p\" (UID: \"75bb4a41-cec1-4808-9d05-35ea682d4d35\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494830-dqg2p" Jan 29 12:30:00 crc kubenswrapper[4753]: I0129 12:30:00.399983 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/75bb4a41-cec1-4808-9d05-35ea682d4d35-secret-volume\") pod \"collect-profiles-29494830-dqg2p\" (UID: \"75bb4a41-cec1-4808-9d05-35ea682d4d35\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494830-dqg2p" Jan 29 12:30:00 crc kubenswrapper[4753]: I0129 12:30:00.410911 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6gk62\" (UniqueName: \"kubernetes.io/projected/75bb4a41-cec1-4808-9d05-35ea682d4d35-kube-api-access-6gk62\") pod \"collect-profiles-29494830-dqg2p\" (UID: \"75bb4a41-cec1-4808-9d05-35ea682d4d35\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494830-dqg2p" Jan 29 12:30:00 crc kubenswrapper[4753]: I0129 12:30:00.467876 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494830-dqg2p" Jan 29 12:30:00 crc kubenswrapper[4753]: I0129 12:30:00.904918 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494830-dqg2p"] Jan 29 12:30:01 crc kubenswrapper[4753]: I0129 12:30:01.152217 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494830-dqg2p" event={"ID":"75bb4a41-cec1-4808-9d05-35ea682d4d35","Type":"ContainerStarted","Data":"46d40dae8655d467223102f8d240802aacb84dece2d7688009382b8035ba6083"} Jan 29 12:30:01 crc kubenswrapper[4753]: I0129 12:30:01.152309 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494830-dqg2p" event={"ID":"75bb4a41-cec1-4808-9d05-35ea682d4d35","Type":"ContainerStarted","Data":"7941cf209cb6791bb54e52d87feca0aadc3231109707f8ae38c10176a0a2245c"} Jan 29 12:30:01 crc kubenswrapper[4753]: I0129 12:30:01.174153 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29494830-dqg2p" podStartSLOduration=1.174124034 podStartE2EDuration="1.174124034s" podCreationTimestamp="2026-01-29 12:30:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:30:01.171789998 +0000 UTC m=+1415.423871453" watchObservedRunningTime="2026-01-29 12:30:01.174124034 +0000 UTC m=+1415.426205509" Jan 29 12:30:02 crc kubenswrapper[4753]: I0129 12:30:02.164796 4753 generic.go:334] "Generic (PLEG): container finished" podID="75bb4a41-cec1-4808-9d05-35ea682d4d35" containerID="46d40dae8655d467223102f8d240802aacb84dece2d7688009382b8035ba6083" exitCode=0 Jan 29 12:30:02 crc kubenswrapper[4753]: I0129 12:30:02.164852 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494830-dqg2p" event={"ID":"75bb4a41-cec1-4808-9d05-35ea682d4d35","Type":"ContainerDied","Data":"46d40dae8655d467223102f8d240802aacb84dece2d7688009382b8035ba6083"} Jan 29 12:30:03 crc kubenswrapper[4753]: I0129 12:30:03.606278 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494830-dqg2p" Jan 29 12:30:03 crc kubenswrapper[4753]: I0129 12:30:03.753270 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6gk62\" (UniqueName: \"kubernetes.io/projected/75bb4a41-cec1-4808-9d05-35ea682d4d35-kube-api-access-6gk62\") pod \"75bb4a41-cec1-4808-9d05-35ea682d4d35\" (UID: \"75bb4a41-cec1-4808-9d05-35ea682d4d35\") " Jan 29 12:30:03 crc kubenswrapper[4753]: I0129 12:30:03.753742 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/75bb4a41-cec1-4808-9d05-35ea682d4d35-secret-volume\") pod \"75bb4a41-cec1-4808-9d05-35ea682d4d35\" (UID: \"75bb4a41-cec1-4808-9d05-35ea682d4d35\") " Jan 29 12:30:03 crc kubenswrapper[4753]: I0129 12:30:03.754538 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/75bb4a41-cec1-4808-9d05-35ea682d4d35-config-volume\") pod \"75bb4a41-cec1-4808-9d05-35ea682d4d35\" (UID: \"75bb4a41-cec1-4808-9d05-35ea682d4d35\") " Jan 29 12:30:03 crc kubenswrapper[4753]: I0129 12:30:03.754903 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/75bb4a41-cec1-4808-9d05-35ea682d4d35-config-volume" (OuterVolumeSpecName: "config-volume") pod "75bb4a41-cec1-4808-9d05-35ea682d4d35" (UID: "75bb4a41-cec1-4808-9d05-35ea682d4d35"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:30:03 crc kubenswrapper[4753]: I0129 12:30:03.755252 4753 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/75bb4a41-cec1-4808-9d05-35ea682d4d35-config-volume\") on node \"crc\" DevicePath \"\"" Jan 29 12:30:03 crc kubenswrapper[4753]: I0129 12:30:03.759441 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/75bb4a41-cec1-4808-9d05-35ea682d4d35-kube-api-access-6gk62" (OuterVolumeSpecName: "kube-api-access-6gk62") pod "75bb4a41-cec1-4808-9d05-35ea682d4d35" (UID: "75bb4a41-cec1-4808-9d05-35ea682d4d35"). InnerVolumeSpecName "kube-api-access-6gk62". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:30:03 crc kubenswrapper[4753]: I0129 12:30:03.762097 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/75bb4a41-cec1-4808-9d05-35ea682d4d35-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "75bb4a41-cec1-4808-9d05-35ea682d4d35" (UID: "75bb4a41-cec1-4808-9d05-35ea682d4d35"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:30:03 crc kubenswrapper[4753]: I0129 12:30:03.856862 4753 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/75bb4a41-cec1-4808-9d05-35ea682d4d35-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 29 12:30:03 crc kubenswrapper[4753]: I0129 12:30:03.856988 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6gk62\" (UniqueName: \"kubernetes.io/projected/75bb4a41-cec1-4808-9d05-35ea682d4d35-kube-api-access-6gk62\") on node \"crc\" DevicePath \"\"" Jan 29 12:30:04 crc kubenswrapper[4753]: I0129 12:30:04.193386 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494830-dqg2p" event={"ID":"75bb4a41-cec1-4808-9d05-35ea682d4d35","Type":"ContainerDied","Data":"7941cf209cb6791bb54e52d87feca0aadc3231109707f8ae38c10176a0a2245c"} Jan 29 12:30:04 crc kubenswrapper[4753]: I0129 12:30:04.193429 4753 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7941cf209cb6791bb54e52d87feca0aadc3231109707f8ae38c10176a0a2245c" Jan 29 12:30:04 crc kubenswrapper[4753]: I0129 12:30:04.193488 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494830-dqg2p" Jan 29 12:30:22 crc kubenswrapper[4753]: I0129 12:30:22.619071 4753 generic.go:334] "Generic (PLEG): container finished" podID="138a8739-c950-4fe4-9aaa-e232bdea3f7b" containerID="7fcbcb12053c0515810e35d7051fa5695d83d9eb527057e515134b63851a5c9a" exitCode=137 Jan 29 12:30:22 crc kubenswrapper[4753]: I0129 12:30:22.619596 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"138a8739-c950-4fe4-9aaa-e232bdea3f7b","Type":"ContainerDied","Data":"7fcbcb12053c0515810e35d7051fa5695d83d9eb527057e515134b63851a5c9a"} Jan 29 12:30:22 crc kubenswrapper[4753]: I0129 12:30:22.868359 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:30:23 crc kubenswrapper[4753]: I0129 12:30:23.076028 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swift\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"138a8739-c950-4fe4-9aaa-e232bdea3f7b\" (UID: \"138a8739-c950-4fe4-9aaa-e232bdea3f7b\") " Jan 29 12:30:23 crc kubenswrapper[4753]: I0129 12:30:23.076100 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/138a8739-c950-4fe4-9aaa-e232bdea3f7b-lock\") pod \"138a8739-c950-4fe4-9aaa-e232bdea3f7b\" (UID: \"138a8739-c950-4fe4-9aaa-e232bdea3f7b\") " Jan 29 12:30:23 crc kubenswrapper[4753]: I0129 12:30:23.076123 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/138a8739-c950-4fe4-9aaa-e232bdea3f7b-cache\") pod \"138a8739-c950-4fe4-9aaa-e232bdea3f7b\" (UID: \"138a8739-c950-4fe4-9aaa-e232bdea3f7b\") " Jan 29 12:30:23 crc kubenswrapper[4753]: I0129 12:30:23.076152 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jlczr\" (UniqueName: \"kubernetes.io/projected/138a8739-c950-4fe4-9aaa-e232bdea3f7b-kube-api-access-jlczr\") pod \"138a8739-c950-4fe4-9aaa-e232bdea3f7b\" (UID: \"138a8739-c950-4fe4-9aaa-e232bdea3f7b\") " Jan 29 12:30:23 crc kubenswrapper[4753]: I0129 12:30:23.076174 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/138a8739-c950-4fe4-9aaa-e232bdea3f7b-etc-swift\") pod \"138a8739-c950-4fe4-9aaa-e232bdea3f7b\" (UID: \"138a8739-c950-4fe4-9aaa-e232bdea3f7b\") " Jan 29 12:30:23 crc kubenswrapper[4753]: I0129 12:30:23.076879 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/138a8739-c950-4fe4-9aaa-e232bdea3f7b-lock" (OuterVolumeSpecName: "lock") pod "138a8739-c950-4fe4-9aaa-e232bdea3f7b" (UID: "138a8739-c950-4fe4-9aaa-e232bdea3f7b"). InnerVolumeSpecName "lock". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:30:23 crc kubenswrapper[4753]: I0129 12:30:23.077475 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/138a8739-c950-4fe4-9aaa-e232bdea3f7b-cache" (OuterVolumeSpecName: "cache") pod "138a8739-c950-4fe4-9aaa-e232bdea3f7b" (UID: "138a8739-c950-4fe4-9aaa-e232bdea3f7b"). InnerVolumeSpecName "cache". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:30:23 crc kubenswrapper[4753]: I0129 12:30:23.086187 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/138a8739-c950-4fe4-9aaa-e232bdea3f7b-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "138a8739-c950-4fe4-9aaa-e232bdea3f7b" (UID: "138a8739-c950-4fe4-9aaa-e232bdea3f7b"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:30:23 crc kubenswrapper[4753]: I0129 12:30:23.093439 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage05-crc" (OuterVolumeSpecName: "swift") pod "138a8739-c950-4fe4-9aaa-e232bdea3f7b" (UID: "138a8739-c950-4fe4-9aaa-e232bdea3f7b"). InnerVolumeSpecName "local-storage05-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 29 12:30:23 crc kubenswrapper[4753]: I0129 12:30:23.093727 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/138a8739-c950-4fe4-9aaa-e232bdea3f7b-kube-api-access-jlczr" (OuterVolumeSpecName: "kube-api-access-jlczr") pod "138a8739-c950-4fe4-9aaa-e232bdea3f7b" (UID: "138a8739-c950-4fe4-9aaa-e232bdea3f7b"). InnerVolumeSpecName "kube-api-access-jlczr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:30:23 crc kubenswrapper[4753]: I0129 12:30:23.177628 4753 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" " Jan 29 12:30:23 crc kubenswrapper[4753]: I0129 12:30:23.177984 4753 reconciler_common.go:293] "Volume detached for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/138a8739-c950-4fe4-9aaa-e232bdea3f7b-lock\") on node \"crc\" DevicePath \"\"" Jan 29 12:30:23 crc kubenswrapper[4753]: I0129 12:30:23.178087 4753 reconciler_common.go:293] "Volume detached for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/138a8739-c950-4fe4-9aaa-e232bdea3f7b-cache\") on node \"crc\" DevicePath \"\"" Jan 29 12:30:23 crc kubenswrapper[4753]: I0129 12:30:23.178179 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jlczr\" (UniqueName: \"kubernetes.io/projected/138a8739-c950-4fe4-9aaa-e232bdea3f7b-kube-api-access-jlczr\") on node \"crc\" DevicePath \"\"" Jan 29 12:30:23 crc kubenswrapper[4753]: I0129 12:30:23.178342 4753 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/138a8739-c950-4fe4-9aaa-e232bdea3f7b-etc-swift\") on node \"crc\" DevicePath \"\"" Jan 29 12:30:23 crc kubenswrapper[4753]: I0129 12:30:23.192897 4753 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage05-crc" (UniqueName: "kubernetes.io/local-volume/local-storage05-crc") on node "crc" Jan 29 12:30:23 crc kubenswrapper[4753]: I0129 12:30:23.280410 4753 reconciler_common.go:293] "Volume detached for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" DevicePath \"\"" Jan 29 12:30:23 crc kubenswrapper[4753]: I0129 12:30:23.643493 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"138a8739-c950-4fe4-9aaa-e232bdea3f7b","Type":"ContainerDied","Data":"04e8b885bf5708fa3401f1a27e8523229ca977ae09514545c5098a56e2dca64b"} Jan 29 12:30:23 crc kubenswrapper[4753]: I0129 12:30:23.643572 4753 scope.go:117] "RemoveContainer" containerID="bb19f71c8e939cc414644c25eaf5efc7de06e81b24e457c1ea9f7a902dcce561" Jan 29 12:30:23 crc kubenswrapper[4753]: I0129 12:30:23.643731 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:30:23 crc kubenswrapper[4753]: I0129 12:30:23.687496 4753 scope.go:117] "RemoveContainer" containerID="7fcbcb12053c0515810e35d7051fa5695d83d9eb527057e515134b63851a5c9a" Jan 29 12:30:23 crc kubenswrapper[4753]: I0129 12:30:23.720270 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["swift-kuttl-tests/swift-storage-0"] Jan 29 12:30:23 crc kubenswrapper[4753]: I0129 12:30:23.722641 4753 scope.go:117] "RemoveContainer" containerID="40737a21840cfda7ec2c3c77af6e8921570fae2535ec52d19c97b4e32a6925e5" Jan 29 12:30:23 crc kubenswrapper[4753]: I0129 12:30:23.729588 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["swift-kuttl-tests/swift-storage-0"] Jan 29 12:30:23 crc kubenswrapper[4753]: I0129 12:30:23.750893 4753 scope.go:117] "RemoveContainer" containerID="5b83e1721f6dc650c1c22675c76d28414a7df673068b610229a08310233f3430" Jan 29 12:30:23 crc kubenswrapper[4753]: I0129 12:30:23.767428 4753 scope.go:117] "RemoveContainer" containerID="c3746f53cf45602790eb986ea13b559998358657e337ca9a64e63b7065410941" Jan 29 12:30:23 crc kubenswrapper[4753]: I0129 12:30:23.785654 4753 scope.go:117] "RemoveContainer" containerID="e5782ff28242ca4b5c6270a37148829832413394f867d4c8a8b48f28cd77d95b" Jan 29 12:30:23 crc kubenswrapper[4753]: I0129 12:30:23.802391 4753 scope.go:117] "RemoveContainer" containerID="83c3bedf95a58e8d14219b47ae5fb9e7f9e6c837be7170b58c5b00ab6bb13bd3" Jan 29 12:30:23 crc kubenswrapper[4753]: I0129 12:30:23.816358 4753 scope.go:117] "RemoveContainer" containerID="e9fae1d0304d1a6d261a833051d3006fa143b16cd6d6e8cbeb4a066535097794" Jan 29 12:30:23 crc kubenswrapper[4753]: I0129 12:30:23.831679 4753 scope.go:117] "RemoveContainer" containerID="aae9e4a99411d122b8ed7b019b9e7329b1b82581947c665733e25cb2ccf08256" Jan 29 12:30:23 crc kubenswrapper[4753]: I0129 12:30:23.849064 4753 scope.go:117] "RemoveContainer" containerID="9bda9aa7a80e8ba90786ee356d5874c29c2779adfc8d54c3a2dccdc45759191f" Jan 29 12:30:23 crc kubenswrapper[4753]: I0129 12:30:23.864331 4753 scope.go:117] "RemoveContainer" containerID="b31c0f4794d0d19b53ee6f57a63b973e041e1db6694a61130bdd7106c83c286a" Jan 29 12:30:23 crc kubenswrapper[4753]: I0129 12:30:23.880017 4753 scope.go:117] "RemoveContainer" containerID="87187568015a0e156148a44f379441b4861da67970bece8b4474914aa31f9dba" Jan 29 12:30:23 crc kubenswrapper[4753]: I0129 12:30:23.895548 4753 scope.go:117] "RemoveContainer" containerID="cea3edd31c2b81739cf355a1b06782457367cb6df6f5563b24c73f09b71ddbf9" Jan 29 12:30:23 crc kubenswrapper[4753]: I0129 12:30:23.900364 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="138a8739-c950-4fe4-9aaa-e232bdea3f7b" path="/var/lib/kubelet/pods/138a8739-c950-4fe4-9aaa-e232bdea3f7b/volumes" Jan 29 12:30:23 crc kubenswrapper[4753]: I0129 12:30:23.909525 4753 scope.go:117] "RemoveContainer" containerID="da235b77fd3100fc09167c2bfa1fe6dcd9d3796d72e3c97fa902c454142b0594" Jan 29 12:30:23 crc kubenswrapper[4753]: I0129 12:30:23.923422 4753 scope.go:117] "RemoveContainer" containerID="6fb444f8a28cd99977a4bf8425096045f77ba4946835c63cbdeadef200038963" Jan 29 12:30:23 crc kubenswrapper[4753]: I0129 12:30:23.962997 4753 scope.go:117] "RemoveContainer" containerID="d2af7af180031de8fdd0983850a21bbb0dc7bc730ec666598e572f49cb56c2a4" Jan 29 12:30:24 crc kubenswrapper[4753]: I0129 12:30:24.819906 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-7fm9z"] Jan 29 12:30:24 crc kubenswrapper[4753]: E0129 12:30:24.821359 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="138a8739-c950-4fe4-9aaa-e232bdea3f7b" containerName="object-replicator" Jan 29 12:30:24 crc kubenswrapper[4753]: I0129 12:30:24.821623 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="138a8739-c950-4fe4-9aaa-e232bdea3f7b" containerName="object-replicator" Jan 29 12:30:24 crc kubenswrapper[4753]: E0129 12:30:24.821699 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="138a8739-c950-4fe4-9aaa-e232bdea3f7b" containerName="object-auditor" Jan 29 12:30:24 crc kubenswrapper[4753]: I0129 12:30:24.821770 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="138a8739-c950-4fe4-9aaa-e232bdea3f7b" containerName="object-auditor" Jan 29 12:30:24 crc kubenswrapper[4753]: E0129 12:30:24.827606 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="138a8739-c950-4fe4-9aaa-e232bdea3f7b" containerName="object-updater" Jan 29 12:30:24 crc kubenswrapper[4753]: I0129 12:30:24.827842 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="138a8739-c950-4fe4-9aaa-e232bdea3f7b" containerName="object-updater" Jan 29 12:30:24 crc kubenswrapper[4753]: E0129 12:30:24.827904 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="138a8739-c950-4fe4-9aaa-e232bdea3f7b" containerName="account-replicator" Jan 29 12:30:24 crc kubenswrapper[4753]: I0129 12:30:24.827954 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="138a8739-c950-4fe4-9aaa-e232bdea3f7b" containerName="account-replicator" Jan 29 12:30:24 crc kubenswrapper[4753]: E0129 12:30:24.828051 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="138a8739-c950-4fe4-9aaa-e232bdea3f7b" containerName="object-server" Jan 29 12:30:24 crc kubenswrapper[4753]: I0129 12:30:24.828103 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="138a8739-c950-4fe4-9aaa-e232bdea3f7b" containerName="object-server" Jan 29 12:30:24 crc kubenswrapper[4753]: E0129 12:30:24.828167 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="138a8739-c950-4fe4-9aaa-e232bdea3f7b" containerName="account-server" Jan 29 12:30:24 crc kubenswrapper[4753]: I0129 12:30:24.828236 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="138a8739-c950-4fe4-9aaa-e232bdea3f7b" containerName="account-server" Jan 29 12:30:24 crc kubenswrapper[4753]: E0129 12:30:24.828297 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="138a8739-c950-4fe4-9aaa-e232bdea3f7b" containerName="container-replicator" Jan 29 12:30:24 crc kubenswrapper[4753]: I0129 12:30:24.828380 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="138a8739-c950-4fe4-9aaa-e232bdea3f7b" containerName="container-replicator" Jan 29 12:30:24 crc kubenswrapper[4753]: E0129 12:30:24.828452 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="138a8739-c950-4fe4-9aaa-e232bdea3f7b" containerName="container-server" Jan 29 12:30:24 crc kubenswrapper[4753]: I0129 12:30:24.828517 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="138a8739-c950-4fe4-9aaa-e232bdea3f7b" containerName="container-server" Jan 29 12:30:24 crc kubenswrapper[4753]: E0129 12:30:24.828589 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="138a8739-c950-4fe4-9aaa-e232bdea3f7b" containerName="rsync" Jan 29 12:30:24 crc kubenswrapper[4753]: I0129 12:30:24.828646 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="138a8739-c950-4fe4-9aaa-e232bdea3f7b" containerName="rsync" Jan 29 12:30:24 crc kubenswrapper[4753]: E0129 12:30:24.828709 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="138a8739-c950-4fe4-9aaa-e232bdea3f7b" containerName="container-auditor" Jan 29 12:30:24 crc kubenswrapper[4753]: I0129 12:30:24.828761 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="138a8739-c950-4fe4-9aaa-e232bdea3f7b" containerName="container-auditor" Jan 29 12:30:24 crc kubenswrapper[4753]: E0129 12:30:24.828829 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="138a8739-c950-4fe4-9aaa-e232bdea3f7b" containerName="container-sharder" Jan 29 12:30:24 crc kubenswrapper[4753]: I0129 12:30:24.828882 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="138a8739-c950-4fe4-9aaa-e232bdea3f7b" containerName="container-sharder" Jan 29 12:30:24 crc kubenswrapper[4753]: E0129 12:30:24.828945 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="138a8739-c950-4fe4-9aaa-e232bdea3f7b" containerName="object-expirer" Jan 29 12:30:24 crc kubenswrapper[4753]: I0129 12:30:24.829015 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="138a8739-c950-4fe4-9aaa-e232bdea3f7b" containerName="object-expirer" Jan 29 12:30:24 crc kubenswrapper[4753]: E0129 12:30:24.829086 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="138a8739-c950-4fe4-9aaa-e232bdea3f7b" containerName="swift-recon-cron" Jan 29 12:30:24 crc kubenswrapper[4753]: I0129 12:30:24.829146 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="138a8739-c950-4fe4-9aaa-e232bdea3f7b" containerName="swift-recon-cron" Jan 29 12:30:24 crc kubenswrapper[4753]: E0129 12:30:24.829209 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="138a8739-c950-4fe4-9aaa-e232bdea3f7b" containerName="account-reaper" Jan 29 12:30:24 crc kubenswrapper[4753]: I0129 12:30:24.829288 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="138a8739-c950-4fe4-9aaa-e232bdea3f7b" containerName="account-reaper" Jan 29 12:30:24 crc kubenswrapper[4753]: E0129 12:30:24.829340 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="138a8739-c950-4fe4-9aaa-e232bdea3f7b" containerName="container-updater" Jan 29 12:30:24 crc kubenswrapper[4753]: I0129 12:30:24.829476 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="138a8739-c950-4fe4-9aaa-e232bdea3f7b" containerName="container-updater" Jan 29 12:30:24 crc kubenswrapper[4753]: E0129 12:30:24.829535 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="75bb4a41-cec1-4808-9d05-35ea682d4d35" containerName="collect-profiles" Jan 29 12:30:24 crc kubenswrapper[4753]: I0129 12:30:24.829585 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="75bb4a41-cec1-4808-9d05-35ea682d4d35" containerName="collect-profiles" Jan 29 12:30:24 crc kubenswrapper[4753]: E0129 12:30:24.829637 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="138a8739-c950-4fe4-9aaa-e232bdea3f7b" containerName="account-auditor" Jan 29 12:30:24 crc kubenswrapper[4753]: I0129 12:30:24.829690 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="138a8739-c950-4fe4-9aaa-e232bdea3f7b" containerName="account-auditor" Jan 29 12:30:24 crc kubenswrapper[4753]: I0129 12:30:24.829997 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="138a8739-c950-4fe4-9aaa-e232bdea3f7b" containerName="account-server" Jan 29 12:30:24 crc kubenswrapper[4753]: I0129 12:30:24.830067 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="138a8739-c950-4fe4-9aaa-e232bdea3f7b" containerName="object-replicator" Jan 29 12:30:24 crc kubenswrapper[4753]: I0129 12:30:24.830128 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="138a8739-c950-4fe4-9aaa-e232bdea3f7b" containerName="object-auditor" Jan 29 12:30:24 crc kubenswrapper[4753]: I0129 12:30:24.830186 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="138a8739-c950-4fe4-9aaa-e232bdea3f7b" containerName="swift-recon-cron" Jan 29 12:30:24 crc kubenswrapper[4753]: I0129 12:30:24.830255 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="138a8739-c950-4fe4-9aaa-e232bdea3f7b" containerName="object-expirer" Jan 29 12:30:24 crc kubenswrapper[4753]: I0129 12:30:24.830308 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="138a8739-c950-4fe4-9aaa-e232bdea3f7b" containerName="container-updater" Jan 29 12:30:24 crc kubenswrapper[4753]: I0129 12:30:24.830366 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="138a8739-c950-4fe4-9aaa-e232bdea3f7b" containerName="container-sharder" Jan 29 12:30:24 crc kubenswrapper[4753]: I0129 12:30:24.830439 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="138a8739-c950-4fe4-9aaa-e232bdea3f7b" containerName="container-auditor" Jan 29 12:30:24 crc kubenswrapper[4753]: I0129 12:30:24.830499 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="75bb4a41-cec1-4808-9d05-35ea682d4d35" containerName="collect-profiles" Jan 29 12:30:24 crc kubenswrapper[4753]: I0129 12:30:24.830566 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="138a8739-c950-4fe4-9aaa-e232bdea3f7b" containerName="account-reaper" Jan 29 12:30:24 crc kubenswrapper[4753]: I0129 12:30:24.830617 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="138a8739-c950-4fe4-9aaa-e232bdea3f7b" containerName="object-server" Jan 29 12:30:24 crc kubenswrapper[4753]: I0129 12:30:24.830669 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="138a8739-c950-4fe4-9aaa-e232bdea3f7b" containerName="object-updater" Jan 29 12:30:24 crc kubenswrapper[4753]: I0129 12:30:24.830723 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="138a8739-c950-4fe4-9aaa-e232bdea3f7b" containerName="container-replicator" Jan 29 12:30:24 crc kubenswrapper[4753]: I0129 12:30:24.830783 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="138a8739-c950-4fe4-9aaa-e232bdea3f7b" containerName="account-auditor" Jan 29 12:30:24 crc kubenswrapper[4753]: I0129 12:30:24.830842 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="138a8739-c950-4fe4-9aaa-e232bdea3f7b" containerName="container-server" Jan 29 12:30:24 crc kubenswrapper[4753]: I0129 12:30:24.830892 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="138a8739-c950-4fe4-9aaa-e232bdea3f7b" containerName="rsync" Jan 29 12:30:24 crc kubenswrapper[4753]: I0129 12:30:24.830943 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="138a8739-c950-4fe4-9aaa-e232bdea3f7b" containerName="account-replicator" Jan 29 12:30:24 crc kubenswrapper[4753]: I0129 12:30:24.832020 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7fm9z" Jan 29 12:30:24 crc kubenswrapper[4753]: I0129 12:30:24.835176 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-7fm9z"] Jan 29 12:30:25 crc kubenswrapper[4753]: I0129 12:30:25.060146 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lw6n8\" (UniqueName: \"kubernetes.io/projected/c9067882-bffb-477d-9b76-b22f026a77d5-kube-api-access-lw6n8\") pod \"redhat-operators-7fm9z\" (UID: \"c9067882-bffb-477d-9b76-b22f026a77d5\") " pod="openshift-marketplace/redhat-operators-7fm9z" Jan 29 12:30:25 crc kubenswrapper[4753]: I0129 12:30:25.060315 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c9067882-bffb-477d-9b76-b22f026a77d5-catalog-content\") pod \"redhat-operators-7fm9z\" (UID: \"c9067882-bffb-477d-9b76-b22f026a77d5\") " pod="openshift-marketplace/redhat-operators-7fm9z" Jan 29 12:30:25 crc kubenswrapper[4753]: I0129 12:30:25.060402 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c9067882-bffb-477d-9b76-b22f026a77d5-utilities\") pod \"redhat-operators-7fm9z\" (UID: \"c9067882-bffb-477d-9b76-b22f026a77d5\") " pod="openshift-marketplace/redhat-operators-7fm9z" Jan 29 12:30:25 crc kubenswrapper[4753]: I0129 12:30:25.161494 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c9067882-bffb-477d-9b76-b22f026a77d5-catalog-content\") pod \"redhat-operators-7fm9z\" (UID: \"c9067882-bffb-477d-9b76-b22f026a77d5\") " pod="openshift-marketplace/redhat-operators-7fm9z" Jan 29 12:30:25 crc kubenswrapper[4753]: I0129 12:30:25.161556 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c9067882-bffb-477d-9b76-b22f026a77d5-utilities\") pod \"redhat-operators-7fm9z\" (UID: \"c9067882-bffb-477d-9b76-b22f026a77d5\") " pod="openshift-marketplace/redhat-operators-7fm9z" Jan 29 12:30:25 crc kubenswrapper[4753]: I0129 12:30:25.161626 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lw6n8\" (UniqueName: \"kubernetes.io/projected/c9067882-bffb-477d-9b76-b22f026a77d5-kube-api-access-lw6n8\") pod \"redhat-operators-7fm9z\" (UID: \"c9067882-bffb-477d-9b76-b22f026a77d5\") " pod="openshift-marketplace/redhat-operators-7fm9z" Jan 29 12:30:25 crc kubenswrapper[4753]: I0129 12:30:25.162079 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c9067882-bffb-477d-9b76-b22f026a77d5-catalog-content\") pod \"redhat-operators-7fm9z\" (UID: \"c9067882-bffb-477d-9b76-b22f026a77d5\") " pod="openshift-marketplace/redhat-operators-7fm9z" Jan 29 12:30:25 crc kubenswrapper[4753]: I0129 12:30:25.162112 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c9067882-bffb-477d-9b76-b22f026a77d5-utilities\") pod \"redhat-operators-7fm9z\" (UID: \"c9067882-bffb-477d-9b76-b22f026a77d5\") " pod="openshift-marketplace/redhat-operators-7fm9z" Jan 29 12:30:25 crc kubenswrapper[4753]: I0129 12:30:25.182923 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lw6n8\" (UniqueName: \"kubernetes.io/projected/c9067882-bffb-477d-9b76-b22f026a77d5-kube-api-access-lw6n8\") pod \"redhat-operators-7fm9z\" (UID: \"c9067882-bffb-477d-9b76-b22f026a77d5\") " pod="openshift-marketplace/redhat-operators-7fm9z" Jan 29 12:30:25 crc kubenswrapper[4753]: I0129 12:30:25.473713 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7fm9z" Jan 29 12:30:25 crc kubenswrapper[4753]: I0129 12:30:25.757903 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-7fm9z"] Jan 29 12:30:25 crc kubenswrapper[4753]: I0129 12:30:25.962835 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["swift-kuttl-tests/swift-storage-0"] Jan 29 12:30:25 crc kubenswrapper[4753]: I0129 12:30:25.968725 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:30:25 crc kubenswrapper[4753]: I0129 12:30:25.971862 4753 reflector.go:368] Caches populated for *v1.Secret from object-"swift-kuttl-tests"/"swift-conf" Jan 29 12:30:25 crc kubenswrapper[4753]: I0129 12:30:25.972055 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"swift-kuttl-tests"/"swift-ring-files" Jan 29 12:30:25 crc kubenswrapper[4753]: I0129 12:30:25.972213 4753 reflector.go:368] Caches populated for *v1.Secret from object-"swift-kuttl-tests"/"swift-swift-dockercfg-jkh9s" Jan 29 12:30:25 crc kubenswrapper[4753]: I0129 12:30:25.973048 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"swift-kuttl-tests"/"swift-storage-config-data" Jan 29 12:30:25 crc kubenswrapper[4753]: I0129 12:30:25.987531 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["swift-kuttl-tests/swift-storage-2"] Jan 29 12:30:26 crc kubenswrapper[4753]: I0129 12:30:26.040453 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["swift-kuttl-tests/swift-storage-1"] Jan 29 12:30:26 crc kubenswrapper[4753]: I0129 12:30:26.040654 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-storage-2" Jan 29 12:30:26 crc kubenswrapper[4753]: I0129 12:30:26.067021 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-storage-1" Jan 29 12:30:26 crc kubenswrapper[4753]: I0129 12:30:26.070584 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/swift-storage-2"] Jan 29 12:30:26 crc kubenswrapper[4753]: I0129 12:30:26.084951 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f6wsh\" (UniqueName: \"kubernetes.io/projected/fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1-kube-api-access-f6wsh\") pod \"swift-storage-0\" (UID: \"fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:30:26 crc kubenswrapper[4753]: I0129 12:30:26.085029 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"swift-storage-0\" (UID: \"fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:30:26 crc kubenswrapper[4753]: I0129 12:30:26.085069 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1-etc-swift\") pod \"swift-storage-0\" (UID: \"fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:30:26 crc kubenswrapper[4753]: I0129 12:30:26.085101 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1-lock\") pod \"swift-storage-0\" (UID: \"fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:30:26 crc kubenswrapper[4753]: I0129 12:30:26.085150 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1-cache\") pod \"swift-storage-0\" (UID: \"fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:30:26 crc kubenswrapper[4753]: I0129 12:30:26.088171 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/swift-storage-0"] Jan 29 12:30:26 crc kubenswrapper[4753]: I0129 12:30:26.104245 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/swift-storage-1"] Jan 29 12:30:26 crc kubenswrapper[4753]: I0129 12:30:26.186090 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/cdf821d7-7cec-4c33-a59e-1c6493fd7281-cache\") pod \"swift-storage-1\" (UID: \"cdf821d7-7cec-4c33-a59e-1c6493fd7281\") " pod="swift-kuttl-tests/swift-storage-1" Jan 29 12:30:26 crc kubenswrapper[4753]: I0129 12:30:26.186162 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1-cache\") pod \"swift-storage-0\" (UID: \"fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:30:26 crc kubenswrapper[4753]: I0129 12:30:26.186189 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/5f186cee-89fe-4c25-b227-628ffcb2a98f-cache\") pod \"swift-storage-2\" (UID: \"5f186cee-89fe-4c25-b227-628ffcb2a98f\") " pod="swift-kuttl-tests/swift-storage-2" Jan 29 12:30:26 crc kubenswrapper[4753]: I0129 12:30:26.186211 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tg5qb\" (UniqueName: \"kubernetes.io/projected/5f186cee-89fe-4c25-b227-628ffcb2a98f-kube-api-access-tg5qb\") pod \"swift-storage-2\" (UID: \"5f186cee-89fe-4c25-b227-628ffcb2a98f\") " pod="swift-kuttl-tests/swift-storage-2" Jan 29 12:30:26 crc kubenswrapper[4753]: I0129 12:30:26.186255 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f6wsh\" (UniqueName: \"kubernetes.io/projected/fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1-kube-api-access-f6wsh\") pod \"swift-storage-0\" (UID: \"fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:30:26 crc kubenswrapper[4753]: I0129 12:30:26.186301 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"swift-storage-0\" (UID: \"fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:30:26 crc kubenswrapper[4753]: I0129 12:30:26.186332 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"swift-storage-1\" (UID: \"cdf821d7-7cec-4c33-a59e-1c6493fd7281\") " pod="swift-kuttl-tests/swift-storage-1" Jan 29 12:30:26 crc kubenswrapper[4753]: I0129 12:30:26.186355 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/cdf821d7-7cec-4c33-a59e-1c6493fd7281-lock\") pod \"swift-storage-1\" (UID: \"cdf821d7-7cec-4c33-a59e-1c6493fd7281\") " pod="swift-kuttl-tests/swift-storage-1" Jan 29 12:30:26 crc kubenswrapper[4753]: I0129 12:30:26.186375 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1-etc-swift\") pod \"swift-storage-0\" (UID: \"fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:30:26 crc kubenswrapper[4753]: I0129 12:30:26.186394 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/5f186cee-89fe-4c25-b227-628ffcb2a98f-lock\") pod \"swift-storage-2\" (UID: \"5f186cee-89fe-4c25-b227-628ffcb2a98f\") " pod="swift-kuttl-tests/swift-storage-2" Jan 29 12:30:26 crc kubenswrapper[4753]: I0129 12:30:26.186418 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/cdf821d7-7cec-4c33-a59e-1c6493fd7281-etc-swift\") pod \"swift-storage-1\" (UID: \"cdf821d7-7cec-4c33-a59e-1c6493fd7281\") " pod="swift-kuttl-tests/swift-storage-1" Jan 29 12:30:26 crc kubenswrapper[4753]: I0129 12:30:26.186442 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1-lock\") pod \"swift-storage-0\" (UID: \"fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:30:26 crc kubenswrapper[4753]: I0129 12:30:26.186464 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lrf7t\" (UniqueName: \"kubernetes.io/projected/cdf821d7-7cec-4c33-a59e-1c6493fd7281-kube-api-access-lrf7t\") pod \"swift-storage-1\" (UID: \"cdf821d7-7cec-4c33-a59e-1c6493fd7281\") " pod="swift-kuttl-tests/swift-storage-1" Jan 29 12:30:26 crc kubenswrapper[4753]: I0129 12:30:26.186483 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"swift-storage-2\" (UID: \"5f186cee-89fe-4c25-b227-628ffcb2a98f\") " pod="swift-kuttl-tests/swift-storage-2" Jan 29 12:30:26 crc kubenswrapper[4753]: I0129 12:30:26.186497 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/5f186cee-89fe-4c25-b227-628ffcb2a98f-etc-swift\") pod \"swift-storage-2\" (UID: \"5f186cee-89fe-4c25-b227-628ffcb2a98f\") " pod="swift-kuttl-tests/swift-storage-2" Jan 29 12:30:26 crc kubenswrapper[4753]: E0129 12:30:26.187191 4753 projected.go:288] Couldn't get configMap swift-kuttl-tests/swift-ring-files: configmap "swift-ring-files" not found Jan 29 12:30:26 crc kubenswrapper[4753]: E0129 12:30:26.187343 4753 projected.go:194] Error preparing data for projected volume etc-swift for pod swift-kuttl-tests/swift-storage-0: configmap "swift-ring-files" not found Jan 29 12:30:26 crc kubenswrapper[4753]: E0129 12:30:26.187479 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1-etc-swift podName:fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1 nodeName:}" failed. No retries permitted until 2026-01-29 12:30:26.68745302 +0000 UTC m=+1440.939534475 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1-etc-swift") pod "swift-storage-0" (UID: "fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1") : configmap "swift-ring-files" not found Jan 29 12:30:26 crc kubenswrapper[4753]: I0129 12:30:26.187238 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1-cache\") pod \"swift-storage-0\" (UID: \"fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:30:26 crc kubenswrapper[4753]: I0129 12:30:26.187492 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1-lock\") pod \"swift-storage-0\" (UID: \"fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:30:26 crc kubenswrapper[4753]: I0129 12:30:26.187783 4753 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"swift-storage-0\" (UID: \"fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1\") device mount path \"/mnt/openstack/pv02\"" pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:30:26 crc kubenswrapper[4753]: I0129 12:30:26.215101 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f6wsh\" (UniqueName: \"kubernetes.io/projected/fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1-kube-api-access-f6wsh\") pod \"swift-storage-0\" (UID: \"fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:30:26 crc kubenswrapper[4753]: I0129 12:30:26.225617 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"swift-storage-0\" (UID: \"fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:30:26 crc kubenswrapper[4753]: E0129 12:30:26.259497 4753 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc9067882_bffb_477d_9b76_b22f026a77d5.slice/crio-conmon-15177b2346f85d0d00160d94ae5f021509bb804799a6f5c43364f6e5fe9ff0f0.scope\": RecentStats: unable to find data in memory cache]" Jan 29 12:30:26 crc kubenswrapper[4753]: I0129 12:30:26.304347 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"swift-storage-1\" (UID: \"cdf821d7-7cec-4c33-a59e-1c6493fd7281\") " pod="swift-kuttl-tests/swift-storage-1" Jan 29 12:30:26 crc kubenswrapper[4753]: I0129 12:30:26.304413 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/cdf821d7-7cec-4c33-a59e-1c6493fd7281-lock\") pod \"swift-storage-1\" (UID: \"cdf821d7-7cec-4c33-a59e-1c6493fd7281\") " pod="swift-kuttl-tests/swift-storage-1" Jan 29 12:30:26 crc kubenswrapper[4753]: I0129 12:30:26.304482 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/5f186cee-89fe-4c25-b227-628ffcb2a98f-lock\") pod \"swift-storage-2\" (UID: \"5f186cee-89fe-4c25-b227-628ffcb2a98f\") " pod="swift-kuttl-tests/swift-storage-2" Jan 29 12:30:26 crc kubenswrapper[4753]: I0129 12:30:26.304530 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/cdf821d7-7cec-4c33-a59e-1c6493fd7281-etc-swift\") pod \"swift-storage-1\" (UID: \"cdf821d7-7cec-4c33-a59e-1c6493fd7281\") " pod="swift-kuttl-tests/swift-storage-1" Jan 29 12:30:26 crc kubenswrapper[4753]: I0129 12:30:26.304582 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lrf7t\" (UniqueName: \"kubernetes.io/projected/cdf821d7-7cec-4c33-a59e-1c6493fd7281-kube-api-access-lrf7t\") pod \"swift-storage-1\" (UID: \"cdf821d7-7cec-4c33-a59e-1c6493fd7281\") " pod="swift-kuttl-tests/swift-storage-1" Jan 29 12:30:26 crc kubenswrapper[4753]: I0129 12:30:26.304627 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"swift-storage-2\" (UID: \"5f186cee-89fe-4c25-b227-628ffcb2a98f\") " pod="swift-kuttl-tests/swift-storage-2" Jan 29 12:30:26 crc kubenswrapper[4753]: I0129 12:30:26.304665 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/5f186cee-89fe-4c25-b227-628ffcb2a98f-etc-swift\") pod \"swift-storage-2\" (UID: \"5f186cee-89fe-4c25-b227-628ffcb2a98f\") " pod="swift-kuttl-tests/swift-storage-2" Jan 29 12:30:26 crc kubenswrapper[4753]: I0129 12:30:26.304721 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/cdf821d7-7cec-4c33-a59e-1c6493fd7281-cache\") pod \"swift-storage-1\" (UID: \"cdf821d7-7cec-4c33-a59e-1c6493fd7281\") " pod="swift-kuttl-tests/swift-storage-1" Jan 29 12:30:26 crc kubenswrapper[4753]: I0129 12:30:26.304796 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/5f186cee-89fe-4c25-b227-628ffcb2a98f-cache\") pod \"swift-storage-2\" (UID: \"5f186cee-89fe-4c25-b227-628ffcb2a98f\") " pod="swift-kuttl-tests/swift-storage-2" Jan 29 12:30:26 crc kubenswrapper[4753]: I0129 12:30:26.304828 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tg5qb\" (UniqueName: \"kubernetes.io/projected/5f186cee-89fe-4c25-b227-628ffcb2a98f-kube-api-access-tg5qb\") pod \"swift-storage-2\" (UID: \"5f186cee-89fe-4c25-b227-628ffcb2a98f\") " pod="swift-kuttl-tests/swift-storage-2" Jan 29 12:30:26 crc kubenswrapper[4753]: I0129 12:30:26.305616 4753 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"swift-storage-1\" (UID: \"cdf821d7-7cec-4c33-a59e-1c6493fd7281\") device mount path \"/mnt/openstack/pv08\"" pod="swift-kuttl-tests/swift-storage-1" Jan 29 12:30:26 crc kubenswrapper[4753]: I0129 12:30:26.307880 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/cdf821d7-7cec-4c33-a59e-1c6493fd7281-lock\") pod \"swift-storage-1\" (UID: \"cdf821d7-7cec-4c33-a59e-1c6493fd7281\") " pod="swift-kuttl-tests/swift-storage-1" Jan 29 12:30:26 crc kubenswrapper[4753]: E0129 12:30:26.308390 4753 projected.go:288] Couldn't get configMap swift-kuttl-tests/swift-ring-files: configmap "swift-ring-files" not found Jan 29 12:30:26 crc kubenswrapper[4753]: E0129 12:30:26.309257 4753 projected.go:194] Error preparing data for projected volume etc-swift for pod swift-kuttl-tests/swift-storage-2: configmap "swift-ring-files" not found Jan 29 12:30:26 crc kubenswrapper[4753]: E0129 12:30:26.309438 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/5f186cee-89fe-4c25-b227-628ffcb2a98f-etc-swift podName:5f186cee-89fe-4c25-b227-628ffcb2a98f nodeName:}" failed. No retries permitted until 2026-01-29 12:30:26.809411265 +0000 UTC m=+1441.061492720 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/5f186cee-89fe-4c25-b227-628ffcb2a98f-etc-swift") pod "swift-storage-2" (UID: "5f186cee-89fe-4c25-b227-628ffcb2a98f") : configmap "swift-ring-files" not found Jan 29 12:30:26 crc kubenswrapper[4753]: E0129 12:30:26.308869 4753 projected.go:288] Couldn't get configMap swift-kuttl-tests/swift-ring-files: configmap "swift-ring-files" not found Jan 29 12:30:26 crc kubenswrapper[4753]: E0129 12:30:26.309863 4753 projected.go:194] Error preparing data for projected volume etc-swift for pod swift-kuttl-tests/swift-storage-1: configmap "swift-ring-files" not found Jan 29 12:30:26 crc kubenswrapper[4753]: E0129 12:30:26.309986 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/cdf821d7-7cec-4c33-a59e-1c6493fd7281-etc-swift podName:cdf821d7-7cec-4c33-a59e-1c6493fd7281 nodeName:}" failed. No retries permitted until 2026-01-29 12:30:26.809970671 +0000 UTC m=+1441.062052126 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/cdf821d7-7cec-4c33-a59e-1c6493fd7281-etc-swift") pod "swift-storage-1" (UID: "cdf821d7-7cec-4c33-a59e-1c6493fd7281") : configmap "swift-ring-files" not found Jan 29 12:30:26 crc kubenswrapper[4753]: I0129 12:30:26.308772 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/cdf821d7-7cec-4c33-a59e-1c6493fd7281-cache\") pod \"swift-storage-1\" (UID: \"cdf821d7-7cec-4c33-a59e-1c6493fd7281\") " pod="swift-kuttl-tests/swift-storage-1" Jan 29 12:30:26 crc kubenswrapper[4753]: I0129 12:30:26.308848 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/5f186cee-89fe-4c25-b227-628ffcb2a98f-cache\") pod \"swift-storage-2\" (UID: \"5f186cee-89fe-4c25-b227-628ffcb2a98f\") " pod="swift-kuttl-tests/swift-storage-2" Jan 29 12:30:26 crc kubenswrapper[4753]: I0129 12:30:26.308437 4753 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"swift-storage-2\" (UID: \"5f186cee-89fe-4c25-b227-628ffcb2a98f\") device mount path \"/mnt/openstack/pv09\"" pod="swift-kuttl-tests/swift-storage-2" Jan 29 12:30:26 crc kubenswrapper[4753]: I0129 12:30:26.309082 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/5f186cee-89fe-4c25-b227-628ffcb2a98f-lock\") pod \"swift-storage-2\" (UID: \"5f186cee-89fe-4c25-b227-628ffcb2a98f\") " pod="swift-kuttl-tests/swift-storage-2" Jan 29 12:30:26 crc kubenswrapper[4753]: I0129 12:30:26.426709 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"swift-storage-1\" (UID: \"cdf821d7-7cec-4c33-a59e-1c6493fd7281\") " pod="swift-kuttl-tests/swift-storage-1" Jan 29 12:30:26 crc kubenswrapper[4753]: I0129 12:30:26.457762 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tg5qb\" (UniqueName: \"kubernetes.io/projected/5f186cee-89fe-4c25-b227-628ffcb2a98f-kube-api-access-tg5qb\") pod \"swift-storage-2\" (UID: \"5f186cee-89fe-4c25-b227-628ffcb2a98f\") " pod="swift-kuttl-tests/swift-storage-2" Jan 29 12:30:26 crc kubenswrapper[4753]: I0129 12:30:26.462575 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["swift-kuttl-tests/swift-ring-rebalance-pckfk"] Jan 29 12:30:26 crc kubenswrapper[4753]: I0129 12:30:26.465533 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-ring-rebalance-pckfk" Jan 29 12:30:26 crc kubenswrapper[4753]: I0129 12:30:26.468092 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lrf7t\" (UniqueName: \"kubernetes.io/projected/cdf821d7-7cec-4c33-a59e-1c6493fd7281-kube-api-access-lrf7t\") pod \"swift-storage-1\" (UID: \"cdf821d7-7cec-4c33-a59e-1c6493fd7281\") " pod="swift-kuttl-tests/swift-storage-1" Jan 29 12:30:26 crc kubenswrapper[4753]: I0129 12:30:26.469720 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"swift-storage-2\" (UID: \"5f186cee-89fe-4c25-b227-628ffcb2a98f\") " pod="swift-kuttl-tests/swift-storage-2" Jan 29 12:30:26 crc kubenswrapper[4753]: I0129 12:30:26.471818 4753 reflector.go:368] Caches populated for *v1.Secret from object-"swift-kuttl-tests"/"swift-proxy-config-data" Jan 29 12:30:26 crc kubenswrapper[4753]: I0129 12:30:26.471823 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"swift-kuttl-tests"/"swift-ring-scripts" Jan 29 12:30:26 crc kubenswrapper[4753]: I0129 12:30:26.472629 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"swift-kuttl-tests"/"swift-ring-config-data" Jan 29 12:30:26 crc kubenswrapper[4753]: I0129 12:30:26.478876 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/swift-ring-rebalance-pckfk"] Jan 29 12:30:26 crc kubenswrapper[4753]: I0129 12:30:26.629900 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/f284a293-aa84-46cd-bb59-b2d8e3d1762c-dispersionconf\") pod \"swift-ring-rebalance-pckfk\" (UID: \"f284a293-aa84-46cd-bb59-b2d8e3d1762c\") " pod="swift-kuttl-tests/swift-ring-rebalance-pckfk" Jan 29 12:30:26 crc kubenswrapper[4753]: I0129 12:30:26.630338 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/f284a293-aa84-46cd-bb59-b2d8e3d1762c-swiftconf\") pod \"swift-ring-rebalance-pckfk\" (UID: \"f284a293-aa84-46cd-bb59-b2d8e3d1762c\") " pod="swift-kuttl-tests/swift-ring-rebalance-pckfk" Jan 29 12:30:26 crc kubenswrapper[4753]: I0129 12:30:26.630567 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f284a293-aa84-46cd-bb59-b2d8e3d1762c-scripts\") pod \"swift-ring-rebalance-pckfk\" (UID: \"f284a293-aa84-46cd-bb59-b2d8e3d1762c\") " pod="swift-kuttl-tests/swift-ring-rebalance-pckfk" Jan 29 12:30:26 crc kubenswrapper[4753]: I0129 12:30:26.630625 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5rx2p\" (UniqueName: \"kubernetes.io/projected/f284a293-aa84-46cd-bb59-b2d8e3d1762c-kube-api-access-5rx2p\") pod \"swift-ring-rebalance-pckfk\" (UID: \"f284a293-aa84-46cd-bb59-b2d8e3d1762c\") " pod="swift-kuttl-tests/swift-ring-rebalance-pckfk" Jan 29 12:30:26 crc kubenswrapper[4753]: I0129 12:30:26.630672 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/f284a293-aa84-46cd-bb59-b2d8e3d1762c-etc-swift\") pod \"swift-ring-rebalance-pckfk\" (UID: \"f284a293-aa84-46cd-bb59-b2d8e3d1762c\") " pod="swift-kuttl-tests/swift-ring-rebalance-pckfk" Jan 29 12:30:26 crc kubenswrapper[4753]: I0129 12:30:26.630761 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/f284a293-aa84-46cd-bb59-b2d8e3d1762c-ring-data-devices\") pod \"swift-ring-rebalance-pckfk\" (UID: \"f284a293-aa84-46cd-bb59-b2d8e3d1762c\") " pod="swift-kuttl-tests/swift-ring-rebalance-pckfk" Jan 29 12:30:26 crc kubenswrapper[4753]: I0129 12:30:26.733012 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f284a293-aa84-46cd-bb59-b2d8e3d1762c-scripts\") pod \"swift-ring-rebalance-pckfk\" (UID: \"f284a293-aa84-46cd-bb59-b2d8e3d1762c\") " pod="swift-kuttl-tests/swift-ring-rebalance-pckfk" Jan 29 12:30:26 crc kubenswrapper[4753]: I0129 12:30:26.733054 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5rx2p\" (UniqueName: \"kubernetes.io/projected/f284a293-aa84-46cd-bb59-b2d8e3d1762c-kube-api-access-5rx2p\") pod \"swift-ring-rebalance-pckfk\" (UID: \"f284a293-aa84-46cd-bb59-b2d8e3d1762c\") " pod="swift-kuttl-tests/swift-ring-rebalance-pckfk" Jan 29 12:30:26 crc kubenswrapper[4753]: I0129 12:30:26.733075 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/f284a293-aa84-46cd-bb59-b2d8e3d1762c-etc-swift\") pod \"swift-ring-rebalance-pckfk\" (UID: \"f284a293-aa84-46cd-bb59-b2d8e3d1762c\") " pod="swift-kuttl-tests/swift-ring-rebalance-pckfk" Jan 29 12:30:26 crc kubenswrapper[4753]: I0129 12:30:26.733099 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/f284a293-aa84-46cd-bb59-b2d8e3d1762c-ring-data-devices\") pod \"swift-ring-rebalance-pckfk\" (UID: \"f284a293-aa84-46cd-bb59-b2d8e3d1762c\") " pod="swift-kuttl-tests/swift-ring-rebalance-pckfk" Jan 29 12:30:26 crc kubenswrapper[4753]: I0129 12:30:26.733160 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/f284a293-aa84-46cd-bb59-b2d8e3d1762c-dispersionconf\") pod \"swift-ring-rebalance-pckfk\" (UID: \"f284a293-aa84-46cd-bb59-b2d8e3d1762c\") " pod="swift-kuttl-tests/swift-ring-rebalance-pckfk" Jan 29 12:30:26 crc kubenswrapper[4753]: I0129 12:30:26.733184 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1-etc-swift\") pod \"swift-storage-0\" (UID: \"fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:30:26 crc kubenswrapper[4753]: I0129 12:30:26.733200 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/f284a293-aa84-46cd-bb59-b2d8e3d1762c-swiftconf\") pod \"swift-ring-rebalance-pckfk\" (UID: \"f284a293-aa84-46cd-bb59-b2d8e3d1762c\") " pod="swift-kuttl-tests/swift-ring-rebalance-pckfk" Jan 29 12:30:26 crc kubenswrapper[4753]: E0129 12:30:26.734217 4753 projected.go:288] Couldn't get configMap swift-kuttl-tests/swift-ring-files: configmap "swift-ring-files" not found Jan 29 12:30:26 crc kubenswrapper[4753]: E0129 12:30:26.734338 4753 projected.go:194] Error preparing data for projected volume etc-swift for pod swift-kuttl-tests/swift-storage-0: configmap "swift-ring-files" not found Jan 29 12:30:26 crc kubenswrapper[4753]: E0129 12:30:26.734447 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1-etc-swift podName:fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1 nodeName:}" failed. No retries permitted until 2026-01-29 12:30:27.734396738 +0000 UTC m=+1441.986478253 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1-etc-swift") pod "swift-storage-0" (UID: "fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1") : configmap "swift-ring-files" not found Jan 29 12:30:26 crc kubenswrapper[4753]: I0129 12:30:26.734840 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/f284a293-aa84-46cd-bb59-b2d8e3d1762c-etc-swift\") pod \"swift-ring-rebalance-pckfk\" (UID: \"f284a293-aa84-46cd-bb59-b2d8e3d1762c\") " pod="swift-kuttl-tests/swift-ring-rebalance-pckfk" Jan 29 12:30:26 crc kubenswrapper[4753]: I0129 12:30:26.735147 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/f284a293-aa84-46cd-bb59-b2d8e3d1762c-ring-data-devices\") pod \"swift-ring-rebalance-pckfk\" (UID: \"f284a293-aa84-46cd-bb59-b2d8e3d1762c\") " pod="swift-kuttl-tests/swift-ring-rebalance-pckfk" Jan 29 12:30:26 crc kubenswrapper[4753]: I0129 12:30:26.735594 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f284a293-aa84-46cd-bb59-b2d8e3d1762c-scripts\") pod \"swift-ring-rebalance-pckfk\" (UID: \"f284a293-aa84-46cd-bb59-b2d8e3d1762c\") " pod="swift-kuttl-tests/swift-ring-rebalance-pckfk" Jan 29 12:30:26 crc kubenswrapper[4753]: I0129 12:30:26.737767 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/f284a293-aa84-46cd-bb59-b2d8e3d1762c-swiftconf\") pod \"swift-ring-rebalance-pckfk\" (UID: \"f284a293-aa84-46cd-bb59-b2d8e3d1762c\") " pod="swift-kuttl-tests/swift-ring-rebalance-pckfk" Jan 29 12:30:26 crc kubenswrapper[4753]: I0129 12:30:26.737956 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/f284a293-aa84-46cd-bb59-b2d8e3d1762c-dispersionconf\") pod \"swift-ring-rebalance-pckfk\" (UID: \"f284a293-aa84-46cd-bb59-b2d8e3d1762c\") " pod="swift-kuttl-tests/swift-ring-rebalance-pckfk" Jan 29 12:30:26 crc kubenswrapper[4753]: I0129 12:30:26.738210 4753 generic.go:334] "Generic (PLEG): container finished" podID="c9067882-bffb-477d-9b76-b22f026a77d5" containerID="15177b2346f85d0d00160d94ae5f021509bb804799a6f5c43364f6e5fe9ff0f0" exitCode=0 Jan 29 12:30:26 crc kubenswrapper[4753]: I0129 12:30:26.738288 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7fm9z" event={"ID":"c9067882-bffb-477d-9b76-b22f026a77d5","Type":"ContainerDied","Data":"15177b2346f85d0d00160d94ae5f021509bb804799a6f5c43364f6e5fe9ff0f0"} Jan 29 12:30:26 crc kubenswrapper[4753]: I0129 12:30:26.738325 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7fm9z" event={"ID":"c9067882-bffb-477d-9b76-b22f026a77d5","Type":"ContainerStarted","Data":"c58f606d1815686cc3c301c4fcc938b5b41f6d2102ed43946efdc23bc02cddc6"} Jan 29 12:30:26 crc kubenswrapper[4753]: I0129 12:30:26.742733 4753 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 29 12:30:26 crc kubenswrapper[4753]: I0129 12:30:26.845266 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/cdf821d7-7cec-4c33-a59e-1c6493fd7281-etc-swift\") pod \"swift-storage-1\" (UID: \"cdf821d7-7cec-4c33-a59e-1c6493fd7281\") " pod="swift-kuttl-tests/swift-storage-1" Jan 29 12:30:26 crc kubenswrapper[4753]: I0129 12:30:26.845341 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/5f186cee-89fe-4c25-b227-628ffcb2a98f-etc-swift\") pod \"swift-storage-2\" (UID: \"5f186cee-89fe-4c25-b227-628ffcb2a98f\") " pod="swift-kuttl-tests/swift-storage-2" Jan 29 12:30:26 crc kubenswrapper[4753]: E0129 12:30:26.845594 4753 projected.go:288] Couldn't get configMap swift-kuttl-tests/swift-ring-files: configmap "swift-ring-files" not found Jan 29 12:30:26 crc kubenswrapper[4753]: E0129 12:30:26.845609 4753 projected.go:194] Error preparing data for projected volume etc-swift for pod swift-kuttl-tests/swift-storage-2: configmap "swift-ring-files" not found Jan 29 12:30:26 crc kubenswrapper[4753]: E0129 12:30:26.845656 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/5f186cee-89fe-4c25-b227-628ffcb2a98f-etc-swift podName:5f186cee-89fe-4c25-b227-628ffcb2a98f nodeName:}" failed. No retries permitted until 2026-01-29 12:30:27.845639488 +0000 UTC m=+1442.097720943 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/5f186cee-89fe-4c25-b227-628ffcb2a98f-etc-swift") pod "swift-storage-2" (UID: "5f186cee-89fe-4c25-b227-628ffcb2a98f") : configmap "swift-ring-files" not found Jan 29 12:30:26 crc kubenswrapper[4753]: E0129 12:30:26.845714 4753 projected.go:288] Couldn't get configMap swift-kuttl-tests/swift-ring-files: configmap "swift-ring-files" not found Jan 29 12:30:26 crc kubenswrapper[4753]: E0129 12:30:26.845722 4753 projected.go:194] Error preparing data for projected volume etc-swift for pod swift-kuttl-tests/swift-storage-1: configmap "swift-ring-files" not found Jan 29 12:30:26 crc kubenswrapper[4753]: E0129 12:30:26.845739 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/cdf821d7-7cec-4c33-a59e-1c6493fd7281-etc-swift podName:cdf821d7-7cec-4c33-a59e-1c6493fd7281 nodeName:}" failed. No retries permitted until 2026-01-29 12:30:27.845732641 +0000 UTC m=+1442.097814096 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/cdf821d7-7cec-4c33-a59e-1c6493fd7281-etc-swift") pod "swift-storage-1" (UID: "cdf821d7-7cec-4c33-a59e-1c6493fd7281") : configmap "swift-ring-files" not found Jan 29 12:30:26 crc kubenswrapper[4753]: I0129 12:30:26.847978 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["swift-kuttl-tests/swift-proxy-6bc96d68cf-kbrl4"] Jan 29 12:30:26 crc kubenswrapper[4753]: I0129 12:30:26.849598 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-proxy-6bc96d68cf-kbrl4" Jan 29 12:30:26 crc kubenswrapper[4753]: I0129 12:30:26.864567 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5rx2p\" (UniqueName: \"kubernetes.io/projected/f284a293-aa84-46cd-bb59-b2d8e3d1762c-kube-api-access-5rx2p\") pod \"swift-ring-rebalance-pckfk\" (UID: \"f284a293-aa84-46cd-bb59-b2d8e3d1762c\") " pod="swift-kuttl-tests/swift-ring-rebalance-pckfk" Jan 29 12:30:26 crc kubenswrapper[4753]: I0129 12:30:26.870295 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/swift-proxy-6bc96d68cf-kbrl4"] Jan 29 12:30:26 crc kubenswrapper[4753]: I0129 12:30:26.947460 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cec6584a-fcc6-4b1d-8516-c2e6b2194048-log-httpd\") pod \"swift-proxy-6bc96d68cf-kbrl4\" (UID: \"cec6584a-fcc6-4b1d-8516-c2e6b2194048\") " pod="swift-kuttl-tests/swift-proxy-6bc96d68cf-kbrl4" Jan 29 12:30:26 crc kubenswrapper[4753]: I0129 12:30:26.947575 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cec6584a-fcc6-4b1d-8516-c2e6b2194048-config-data\") pod \"swift-proxy-6bc96d68cf-kbrl4\" (UID: \"cec6584a-fcc6-4b1d-8516-c2e6b2194048\") " pod="swift-kuttl-tests/swift-proxy-6bc96d68cf-kbrl4" Jan 29 12:30:26 crc kubenswrapper[4753]: I0129 12:30:26.947660 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/cec6584a-fcc6-4b1d-8516-c2e6b2194048-etc-swift\") pod \"swift-proxy-6bc96d68cf-kbrl4\" (UID: \"cec6584a-fcc6-4b1d-8516-c2e6b2194048\") " pod="swift-kuttl-tests/swift-proxy-6bc96d68cf-kbrl4" Jan 29 12:30:26 crc kubenswrapper[4753]: I0129 12:30:26.947735 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cec6584a-fcc6-4b1d-8516-c2e6b2194048-run-httpd\") pod \"swift-proxy-6bc96d68cf-kbrl4\" (UID: \"cec6584a-fcc6-4b1d-8516-c2e6b2194048\") " pod="swift-kuttl-tests/swift-proxy-6bc96d68cf-kbrl4" Jan 29 12:30:26 crc kubenswrapper[4753]: I0129 12:30:26.947818 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pxf6k\" (UniqueName: \"kubernetes.io/projected/cec6584a-fcc6-4b1d-8516-c2e6b2194048-kube-api-access-pxf6k\") pod \"swift-proxy-6bc96d68cf-kbrl4\" (UID: \"cec6584a-fcc6-4b1d-8516-c2e6b2194048\") " pod="swift-kuttl-tests/swift-proxy-6bc96d68cf-kbrl4" Jan 29 12:30:27 crc kubenswrapper[4753]: I0129 12:30:27.049324 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cec6584a-fcc6-4b1d-8516-c2e6b2194048-run-httpd\") pod \"swift-proxy-6bc96d68cf-kbrl4\" (UID: \"cec6584a-fcc6-4b1d-8516-c2e6b2194048\") " pod="swift-kuttl-tests/swift-proxy-6bc96d68cf-kbrl4" Jan 29 12:30:27 crc kubenswrapper[4753]: I0129 12:30:27.049427 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pxf6k\" (UniqueName: \"kubernetes.io/projected/cec6584a-fcc6-4b1d-8516-c2e6b2194048-kube-api-access-pxf6k\") pod \"swift-proxy-6bc96d68cf-kbrl4\" (UID: \"cec6584a-fcc6-4b1d-8516-c2e6b2194048\") " pod="swift-kuttl-tests/swift-proxy-6bc96d68cf-kbrl4" Jan 29 12:30:27 crc kubenswrapper[4753]: I0129 12:30:27.049561 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cec6584a-fcc6-4b1d-8516-c2e6b2194048-log-httpd\") pod \"swift-proxy-6bc96d68cf-kbrl4\" (UID: \"cec6584a-fcc6-4b1d-8516-c2e6b2194048\") " pod="swift-kuttl-tests/swift-proxy-6bc96d68cf-kbrl4" Jan 29 12:30:27 crc kubenswrapper[4753]: I0129 12:30:27.049600 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cec6584a-fcc6-4b1d-8516-c2e6b2194048-config-data\") pod \"swift-proxy-6bc96d68cf-kbrl4\" (UID: \"cec6584a-fcc6-4b1d-8516-c2e6b2194048\") " pod="swift-kuttl-tests/swift-proxy-6bc96d68cf-kbrl4" Jan 29 12:30:27 crc kubenswrapper[4753]: I0129 12:30:27.049671 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/cec6584a-fcc6-4b1d-8516-c2e6b2194048-etc-swift\") pod \"swift-proxy-6bc96d68cf-kbrl4\" (UID: \"cec6584a-fcc6-4b1d-8516-c2e6b2194048\") " pod="swift-kuttl-tests/swift-proxy-6bc96d68cf-kbrl4" Jan 29 12:30:27 crc kubenswrapper[4753]: I0129 12:30:27.051645 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cec6584a-fcc6-4b1d-8516-c2e6b2194048-run-httpd\") pod \"swift-proxy-6bc96d68cf-kbrl4\" (UID: \"cec6584a-fcc6-4b1d-8516-c2e6b2194048\") " pod="swift-kuttl-tests/swift-proxy-6bc96d68cf-kbrl4" Jan 29 12:30:27 crc kubenswrapper[4753]: I0129 12:30:27.051970 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cec6584a-fcc6-4b1d-8516-c2e6b2194048-log-httpd\") pod \"swift-proxy-6bc96d68cf-kbrl4\" (UID: \"cec6584a-fcc6-4b1d-8516-c2e6b2194048\") " pod="swift-kuttl-tests/swift-proxy-6bc96d68cf-kbrl4" Jan 29 12:30:27 crc kubenswrapper[4753]: E0129 12:30:27.052631 4753 projected.go:288] Couldn't get configMap swift-kuttl-tests/swift-ring-files: configmap "swift-ring-files" not found Jan 29 12:30:27 crc kubenswrapper[4753]: E0129 12:30:27.052667 4753 projected.go:194] Error preparing data for projected volume etc-swift for pod swift-kuttl-tests/swift-proxy-6bc96d68cf-kbrl4: configmap "swift-ring-files" not found Jan 29 12:30:27 crc kubenswrapper[4753]: E0129 12:30:27.052732 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/cec6584a-fcc6-4b1d-8516-c2e6b2194048-etc-swift podName:cec6584a-fcc6-4b1d-8516-c2e6b2194048 nodeName:}" failed. No retries permitted until 2026-01-29 12:30:27.552709411 +0000 UTC m=+1441.804790936 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/cec6584a-fcc6-4b1d-8516-c2e6b2194048-etc-swift") pod "swift-proxy-6bc96d68cf-kbrl4" (UID: "cec6584a-fcc6-4b1d-8516-c2e6b2194048") : configmap "swift-ring-files" not found Jan 29 12:30:27 crc kubenswrapper[4753]: I0129 12:30:27.059592 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cec6584a-fcc6-4b1d-8516-c2e6b2194048-config-data\") pod \"swift-proxy-6bc96d68cf-kbrl4\" (UID: \"cec6584a-fcc6-4b1d-8516-c2e6b2194048\") " pod="swift-kuttl-tests/swift-proxy-6bc96d68cf-kbrl4" Jan 29 12:30:27 crc kubenswrapper[4753]: I0129 12:30:27.073190 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pxf6k\" (UniqueName: \"kubernetes.io/projected/cec6584a-fcc6-4b1d-8516-c2e6b2194048-kube-api-access-pxf6k\") pod \"swift-proxy-6bc96d68cf-kbrl4\" (UID: \"cec6584a-fcc6-4b1d-8516-c2e6b2194048\") " pod="swift-kuttl-tests/swift-proxy-6bc96d68cf-kbrl4" Jan 29 12:30:27 crc kubenswrapper[4753]: I0129 12:30:27.139866 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-ring-rebalance-pckfk" Jan 29 12:30:27 crc kubenswrapper[4753]: I0129 12:30:27.575431 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/cec6584a-fcc6-4b1d-8516-c2e6b2194048-etc-swift\") pod \"swift-proxy-6bc96d68cf-kbrl4\" (UID: \"cec6584a-fcc6-4b1d-8516-c2e6b2194048\") " pod="swift-kuttl-tests/swift-proxy-6bc96d68cf-kbrl4" Jan 29 12:30:27 crc kubenswrapper[4753]: E0129 12:30:27.576066 4753 projected.go:288] Couldn't get configMap swift-kuttl-tests/swift-ring-files: configmap "swift-ring-files" not found Jan 29 12:30:27 crc kubenswrapper[4753]: E0129 12:30:27.576093 4753 projected.go:194] Error preparing data for projected volume etc-swift for pod swift-kuttl-tests/swift-proxy-6bc96d68cf-kbrl4: configmap "swift-ring-files" not found Jan 29 12:30:27 crc kubenswrapper[4753]: E0129 12:30:27.576160 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/cec6584a-fcc6-4b1d-8516-c2e6b2194048-etc-swift podName:cec6584a-fcc6-4b1d-8516-c2e6b2194048 nodeName:}" failed. No retries permitted until 2026-01-29 12:30:28.57614152 +0000 UTC m=+1442.828222975 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/cec6584a-fcc6-4b1d-8516-c2e6b2194048-etc-swift") pod "swift-proxy-6bc96d68cf-kbrl4" (UID: "cec6584a-fcc6-4b1d-8516-c2e6b2194048") : configmap "swift-ring-files" not found Jan 29 12:30:27 crc kubenswrapper[4753]: I0129 12:30:27.780186 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1-etc-swift\") pod \"swift-storage-0\" (UID: \"fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:30:27 crc kubenswrapper[4753]: E0129 12:30:27.780403 4753 projected.go:288] Couldn't get configMap swift-kuttl-tests/swift-ring-files: configmap "swift-ring-files" not found Jan 29 12:30:27 crc kubenswrapper[4753]: E0129 12:30:27.780446 4753 projected.go:194] Error preparing data for projected volume etc-swift for pod swift-kuttl-tests/swift-storage-0: configmap "swift-ring-files" not found Jan 29 12:30:27 crc kubenswrapper[4753]: E0129 12:30:27.780532 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1-etc-swift podName:fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1 nodeName:}" failed. No retries permitted until 2026-01-29 12:30:29.780508356 +0000 UTC m=+1444.032589851 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1-etc-swift") pod "swift-storage-0" (UID: "fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1") : configmap "swift-ring-files" not found Jan 29 12:30:27 crc kubenswrapper[4753]: I0129 12:30:27.807731 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/swift-ring-rebalance-pckfk"] Jan 29 12:30:27 crc kubenswrapper[4753]: I0129 12:30:27.885707 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/cdf821d7-7cec-4c33-a59e-1c6493fd7281-etc-swift\") pod \"swift-storage-1\" (UID: \"cdf821d7-7cec-4c33-a59e-1c6493fd7281\") " pod="swift-kuttl-tests/swift-storage-1" Jan 29 12:30:27 crc kubenswrapper[4753]: I0129 12:30:27.886129 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/5f186cee-89fe-4c25-b227-628ffcb2a98f-etc-swift\") pod \"swift-storage-2\" (UID: \"5f186cee-89fe-4c25-b227-628ffcb2a98f\") " pod="swift-kuttl-tests/swift-storage-2" Jan 29 12:30:27 crc kubenswrapper[4753]: E0129 12:30:27.886423 4753 projected.go:288] Couldn't get configMap swift-kuttl-tests/swift-ring-files: configmap "swift-ring-files" not found Jan 29 12:30:27 crc kubenswrapper[4753]: E0129 12:30:27.886456 4753 projected.go:194] Error preparing data for projected volume etc-swift for pod swift-kuttl-tests/swift-storage-1: configmap "swift-ring-files" not found Jan 29 12:30:27 crc kubenswrapper[4753]: E0129 12:30:27.886514 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/cdf821d7-7cec-4c33-a59e-1c6493fd7281-etc-swift podName:cdf821d7-7cec-4c33-a59e-1c6493fd7281 nodeName:}" failed. No retries permitted until 2026-01-29 12:30:29.886493767 +0000 UTC m=+1444.138575222 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/cdf821d7-7cec-4c33-a59e-1c6493fd7281-etc-swift") pod "swift-storage-1" (UID: "cdf821d7-7cec-4c33-a59e-1c6493fd7281") : configmap "swift-ring-files" not found Jan 29 12:30:27 crc kubenswrapper[4753]: E0129 12:30:27.888316 4753 projected.go:288] Couldn't get configMap swift-kuttl-tests/swift-ring-files: configmap "swift-ring-files" not found Jan 29 12:30:27 crc kubenswrapper[4753]: E0129 12:30:27.888351 4753 projected.go:194] Error preparing data for projected volume etc-swift for pod swift-kuttl-tests/swift-storage-2: configmap "swift-ring-files" not found Jan 29 12:30:27 crc kubenswrapper[4753]: E0129 12:30:27.888418 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/5f186cee-89fe-4c25-b227-628ffcb2a98f-etc-swift podName:5f186cee-89fe-4c25-b227-628ffcb2a98f nodeName:}" failed. No retries permitted until 2026-01-29 12:30:29.888394981 +0000 UTC m=+1444.140476426 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/5f186cee-89fe-4c25-b227-628ffcb2a98f-etc-swift") pod "swift-storage-2" (UID: "5f186cee-89fe-4c25-b227-628ffcb2a98f") : configmap "swift-ring-files" not found Jan 29 12:30:28 crc kubenswrapper[4753]: I0129 12:30:28.648878 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/cec6584a-fcc6-4b1d-8516-c2e6b2194048-etc-swift\") pod \"swift-proxy-6bc96d68cf-kbrl4\" (UID: \"cec6584a-fcc6-4b1d-8516-c2e6b2194048\") " pod="swift-kuttl-tests/swift-proxy-6bc96d68cf-kbrl4" Jan 29 12:30:28 crc kubenswrapper[4753]: E0129 12:30:28.649359 4753 projected.go:288] Couldn't get configMap swift-kuttl-tests/swift-ring-files: configmap "swift-ring-files" not found Jan 29 12:30:28 crc kubenswrapper[4753]: E0129 12:30:28.649379 4753 projected.go:194] Error preparing data for projected volume etc-swift for pod swift-kuttl-tests/swift-proxy-6bc96d68cf-kbrl4: configmap "swift-ring-files" not found Jan 29 12:30:28 crc kubenswrapper[4753]: E0129 12:30:28.649436 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/cec6584a-fcc6-4b1d-8516-c2e6b2194048-etc-swift podName:cec6584a-fcc6-4b1d-8516-c2e6b2194048 nodeName:}" failed. No retries permitted until 2026-01-29 12:30:30.649417789 +0000 UTC m=+1444.901499244 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/cec6584a-fcc6-4b1d-8516-c2e6b2194048-etc-swift") pod "swift-proxy-6bc96d68cf-kbrl4" (UID: "cec6584a-fcc6-4b1d-8516-c2e6b2194048") : configmap "swift-ring-files" not found Jan 29 12:30:28 crc kubenswrapper[4753]: I0129 12:30:28.941094 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-ring-rebalance-pckfk" event={"ID":"f284a293-aa84-46cd-bb59-b2d8e3d1762c","Type":"ContainerStarted","Data":"e2f9eb2e7c75f4680190aecbec04893b350096f32936813d869cd07a1d19acb3"} Jan 29 12:30:28 crc kubenswrapper[4753]: I0129 12:30:28.941162 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-ring-rebalance-pckfk" event={"ID":"f284a293-aa84-46cd-bb59-b2d8e3d1762c","Type":"ContainerStarted","Data":"0ca1933805d489a197171e218f7ad10ca96156ed894359a33efcf181de1e40b7"} Jan 29 12:30:28 crc kubenswrapper[4753]: I0129 12:30:28.954732 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7fm9z" event={"ID":"c9067882-bffb-477d-9b76-b22f026a77d5","Type":"ContainerStarted","Data":"078739e1e73bbea27a8ad59f6cc1d174a7d4cc79026e595e70d7ffd544ace1a0"} Jan 29 12:30:28 crc kubenswrapper[4753]: I0129 12:30:28.997895 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="swift-kuttl-tests/swift-ring-rebalance-pckfk" podStartSLOduration=2.9978682279999997 podStartE2EDuration="2.997868228s" podCreationTimestamp="2026-01-29 12:30:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:30:28.997609131 +0000 UTC m=+1443.249690606" watchObservedRunningTime="2026-01-29 12:30:28.997868228 +0000 UTC m=+1443.249949683" Jan 29 12:30:29 crc kubenswrapper[4753]: I0129 12:30:29.840817 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1-etc-swift\") pod \"swift-storage-0\" (UID: \"fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:30:29 crc kubenswrapper[4753]: E0129 12:30:29.841060 4753 projected.go:288] Couldn't get configMap swift-kuttl-tests/swift-ring-files: configmap "swift-ring-files" not found Jan 29 12:30:29 crc kubenswrapper[4753]: E0129 12:30:29.841093 4753 projected.go:194] Error preparing data for projected volume etc-swift for pod swift-kuttl-tests/swift-storage-0: configmap "swift-ring-files" not found Jan 29 12:30:29 crc kubenswrapper[4753]: E0129 12:30:29.841173 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1-etc-swift podName:fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1 nodeName:}" failed. No retries permitted until 2026-01-29 12:30:33.841150124 +0000 UTC m=+1448.093231629 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1-etc-swift") pod "swift-storage-0" (UID: "fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1") : configmap "swift-ring-files" not found Jan 29 12:30:29 crc kubenswrapper[4753]: I0129 12:30:29.942439 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/cdf821d7-7cec-4c33-a59e-1c6493fd7281-etc-swift\") pod \"swift-storage-1\" (UID: \"cdf821d7-7cec-4c33-a59e-1c6493fd7281\") " pod="swift-kuttl-tests/swift-storage-1" Jan 29 12:30:29 crc kubenswrapper[4753]: I0129 12:30:29.942534 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/5f186cee-89fe-4c25-b227-628ffcb2a98f-etc-swift\") pod \"swift-storage-2\" (UID: \"5f186cee-89fe-4c25-b227-628ffcb2a98f\") " pod="swift-kuttl-tests/swift-storage-2" Jan 29 12:30:29 crc kubenswrapper[4753]: E0129 12:30:29.942699 4753 projected.go:288] Couldn't get configMap swift-kuttl-tests/swift-ring-files: configmap "swift-ring-files" not found Jan 29 12:30:29 crc kubenswrapper[4753]: E0129 12:30:29.942731 4753 projected.go:194] Error preparing data for projected volume etc-swift for pod swift-kuttl-tests/swift-storage-1: configmap "swift-ring-files" not found Jan 29 12:30:29 crc kubenswrapper[4753]: E0129 12:30:29.942803 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/cdf821d7-7cec-4c33-a59e-1c6493fd7281-etc-swift podName:cdf821d7-7cec-4c33-a59e-1c6493fd7281 nodeName:}" failed. No retries permitted until 2026-01-29 12:30:33.942782331 +0000 UTC m=+1448.194863786 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/cdf821d7-7cec-4c33-a59e-1c6493fd7281-etc-swift") pod "swift-storage-1" (UID: "cdf821d7-7cec-4c33-a59e-1c6493fd7281") : configmap "swift-ring-files" not found Jan 29 12:30:29 crc kubenswrapper[4753]: E0129 12:30:29.942802 4753 projected.go:288] Couldn't get configMap swift-kuttl-tests/swift-ring-files: configmap "swift-ring-files" not found Jan 29 12:30:29 crc kubenswrapper[4753]: E0129 12:30:29.942842 4753 projected.go:194] Error preparing data for projected volume etc-swift for pod swift-kuttl-tests/swift-storage-2: configmap "swift-ring-files" not found Jan 29 12:30:29 crc kubenswrapper[4753]: E0129 12:30:29.942909 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/5f186cee-89fe-4c25-b227-628ffcb2a98f-etc-swift podName:5f186cee-89fe-4c25-b227-628ffcb2a98f nodeName:}" failed. No retries permitted until 2026-01-29 12:30:33.942884544 +0000 UTC m=+1448.194965999 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/5f186cee-89fe-4c25-b227-628ffcb2a98f-etc-swift") pod "swift-storage-2" (UID: "5f186cee-89fe-4c25-b227-628ffcb2a98f") : configmap "swift-ring-files" not found Jan 29 12:30:29 crc kubenswrapper[4753]: I0129 12:30:29.965549 4753 generic.go:334] "Generic (PLEG): container finished" podID="c9067882-bffb-477d-9b76-b22f026a77d5" containerID="078739e1e73bbea27a8ad59f6cc1d174a7d4cc79026e595e70d7ffd544ace1a0" exitCode=0 Jan 29 12:30:29 crc kubenswrapper[4753]: I0129 12:30:29.965669 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7fm9z" event={"ID":"c9067882-bffb-477d-9b76-b22f026a77d5","Type":"ContainerDied","Data":"078739e1e73bbea27a8ad59f6cc1d174a7d4cc79026e595e70d7ffd544ace1a0"} Jan 29 12:30:30 crc kubenswrapper[4753]: I0129 12:30:30.713648 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/cec6584a-fcc6-4b1d-8516-c2e6b2194048-etc-swift\") pod \"swift-proxy-6bc96d68cf-kbrl4\" (UID: \"cec6584a-fcc6-4b1d-8516-c2e6b2194048\") " pod="swift-kuttl-tests/swift-proxy-6bc96d68cf-kbrl4" Jan 29 12:30:30 crc kubenswrapper[4753]: E0129 12:30:30.714052 4753 projected.go:288] Couldn't get configMap swift-kuttl-tests/swift-ring-files: configmap "swift-ring-files" not found Jan 29 12:30:30 crc kubenswrapper[4753]: E0129 12:30:30.714080 4753 projected.go:194] Error preparing data for projected volume etc-swift for pod swift-kuttl-tests/swift-proxy-6bc96d68cf-kbrl4: configmap "swift-ring-files" not found Jan 29 12:30:30 crc kubenswrapper[4753]: E0129 12:30:30.714141 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/cec6584a-fcc6-4b1d-8516-c2e6b2194048-etc-swift podName:cec6584a-fcc6-4b1d-8516-c2e6b2194048 nodeName:}" failed. No retries permitted until 2026-01-29 12:30:34.714120484 +0000 UTC m=+1448.966201939 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/cec6584a-fcc6-4b1d-8516-c2e6b2194048-etc-swift") pod "swift-proxy-6bc96d68cf-kbrl4" (UID: "cec6584a-fcc6-4b1d-8516-c2e6b2194048") : configmap "swift-ring-files" not found Jan 29 12:30:32 crc kubenswrapper[4753]: I0129 12:30:32.995120 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7fm9z" event={"ID":"c9067882-bffb-477d-9b76-b22f026a77d5","Type":"ContainerStarted","Data":"223abbc0dad901a5529c69ace88ca45fe7c6fb846e6b02a64865d73136ac2842"} Jan 29 12:30:33 crc kubenswrapper[4753]: I0129 12:30:33.025474 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-7fm9z" podStartSLOduration=3.669641167 podStartE2EDuration="9.025448223s" podCreationTimestamp="2026-01-29 12:30:24 +0000 UTC" firstStartedPulling="2026-01-29 12:30:26.742314463 +0000 UTC m=+1440.994395918" lastFinishedPulling="2026-01-29 12:30:32.098121529 +0000 UTC m=+1446.350202974" observedRunningTime="2026-01-29 12:30:33.015207622 +0000 UTC m=+1447.267289077" watchObservedRunningTime="2026-01-29 12:30:33.025448223 +0000 UTC m=+1447.277529678" Jan 29 12:30:33 crc kubenswrapper[4753]: I0129 12:30:33.869472 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1-etc-swift\") pod \"swift-storage-0\" (UID: \"fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:30:33 crc kubenswrapper[4753]: E0129 12:30:33.869681 4753 projected.go:288] Couldn't get configMap swift-kuttl-tests/swift-ring-files: configmap "swift-ring-files" not found Jan 29 12:30:33 crc kubenswrapper[4753]: E0129 12:30:33.869714 4753 projected.go:194] Error preparing data for projected volume etc-swift for pod swift-kuttl-tests/swift-storage-0: configmap "swift-ring-files" not found Jan 29 12:30:33 crc kubenswrapper[4753]: E0129 12:30:33.869777 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1-etc-swift podName:fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1 nodeName:}" failed. No retries permitted until 2026-01-29 12:30:41.869756838 +0000 UTC m=+1456.121838293 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1-etc-swift") pod "swift-storage-0" (UID: "fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1") : configmap "swift-ring-files" not found Jan 29 12:30:33 crc kubenswrapper[4753]: I0129 12:30:33.971312 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/5f186cee-89fe-4c25-b227-628ffcb2a98f-etc-swift\") pod \"swift-storage-2\" (UID: \"5f186cee-89fe-4c25-b227-628ffcb2a98f\") " pod="swift-kuttl-tests/swift-storage-2" Jan 29 12:30:33 crc kubenswrapper[4753]: E0129 12:30:33.971499 4753 projected.go:288] Couldn't get configMap swift-kuttl-tests/swift-ring-files: configmap "swift-ring-files" not found Jan 29 12:30:33 crc kubenswrapper[4753]: E0129 12:30:33.971603 4753 projected.go:194] Error preparing data for projected volume etc-swift for pod swift-kuttl-tests/swift-storage-2: configmap "swift-ring-files" not found Jan 29 12:30:33 crc kubenswrapper[4753]: E0129 12:30:33.971665 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/5f186cee-89fe-4c25-b227-628ffcb2a98f-etc-swift podName:5f186cee-89fe-4c25-b227-628ffcb2a98f nodeName:}" failed. No retries permitted until 2026-01-29 12:30:41.971648133 +0000 UTC m=+1456.223729588 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/5f186cee-89fe-4c25-b227-628ffcb2a98f-etc-swift") pod "swift-storage-2" (UID: "5f186cee-89fe-4c25-b227-628ffcb2a98f") : configmap "swift-ring-files" not found Jan 29 12:30:33 crc kubenswrapper[4753]: E0129 12:30:33.972304 4753 projected.go:288] Couldn't get configMap swift-kuttl-tests/swift-ring-files: configmap "swift-ring-files" not found Jan 29 12:30:33 crc kubenswrapper[4753]: E0129 12:30:33.972323 4753 projected.go:194] Error preparing data for projected volume etc-swift for pod swift-kuttl-tests/swift-storage-1: configmap "swift-ring-files" not found Jan 29 12:30:33 crc kubenswrapper[4753]: E0129 12:30:33.972357 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/cdf821d7-7cec-4c33-a59e-1c6493fd7281-etc-swift podName:cdf821d7-7cec-4c33-a59e-1c6493fd7281 nodeName:}" failed. No retries permitted until 2026-01-29 12:30:41.972345202 +0000 UTC m=+1456.224426667 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/cdf821d7-7cec-4c33-a59e-1c6493fd7281-etc-swift") pod "swift-storage-1" (UID: "cdf821d7-7cec-4c33-a59e-1c6493fd7281") : configmap "swift-ring-files" not found Jan 29 12:30:33 crc kubenswrapper[4753]: I0129 12:30:33.972386 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/cdf821d7-7cec-4c33-a59e-1c6493fd7281-etc-swift\") pod \"swift-storage-1\" (UID: \"cdf821d7-7cec-4c33-a59e-1c6493fd7281\") " pod="swift-kuttl-tests/swift-storage-1" Jan 29 12:30:34 crc kubenswrapper[4753]: I0129 12:30:34.783679 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/cec6584a-fcc6-4b1d-8516-c2e6b2194048-etc-swift\") pod \"swift-proxy-6bc96d68cf-kbrl4\" (UID: \"cec6584a-fcc6-4b1d-8516-c2e6b2194048\") " pod="swift-kuttl-tests/swift-proxy-6bc96d68cf-kbrl4" Jan 29 12:30:34 crc kubenswrapper[4753]: E0129 12:30:34.783871 4753 projected.go:288] Couldn't get configMap swift-kuttl-tests/swift-ring-files: configmap "swift-ring-files" not found Jan 29 12:30:34 crc kubenswrapper[4753]: E0129 12:30:34.784087 4753 projected.go:194] Error preparing data for projected volume etc-swift for pod swift-kuttl-tests/swift-proxy-6bc96d68cf-kbrl4: configmap "swift-ring-files" not found Jan 29 12:30:34 crc kubenswrapper[4753]: E0129 12:30:34.784187 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/cec6584a-fcc6-4b1d-8516-c2e6b2194048-etc-swift podName:cec6584a-fcc6-4b1d-8516-c2e6b2194048 nodeName:}" failed. No retries permitted until 2026-01-29 12:30:42.784162715 +0000 UTC m=+1457.036244210 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/cec6584a-fcc6-4b1d-8516-c2e6b2194048-etc-swift") pod "swift-proxy-6bc96d68cf-kbrl4" (UID: "cec6584a-fcc6-4b1d-8516-c2e6b2194048") : configmap "swift-ring-files" not found Jan 29 12:30:35 crc kubenswrapper[4753]: I0129 12:30:35.474492 4753 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-7fm9z" Jan 29 12:30:35 crc kubenswrapper[4753]: I0129 12:30:35.474547 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-7fm9z" Jan 29 12:30:36 crc kubenswrapper[4753]: I0129 12:30:36.524611 4753 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-7fm9z" podUID="c9067882-bffb-477d-9b76-b22f026a77d5" containerName="registry-server" probeResult="failure" output=< Jan 29 12:30:36 crc kubenswrapper[4753]: timeout: failed to connect service ":50051" within 1s Jan 29 12:30:36 crc kubenswrapper[4753]: > Jan 29 12:30:41 crc kubenswrapper[4753]: I0129 12:30:41.955604 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1-etc-swift\") pod \"swift-storage-0\" (UID: \"fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:30:41 crc kubenswrapper[4753]: E0129 12:30:41.955879 4753 projected.go:288] Couldn't get configMap swift-kuttl-tests/swift-ring-files: configmap "swift-ring-files" not found Jan 29 12:30:41 crc kubenswrapper[4753]: E0129 12:30:41.958269 4753 projected.go:194] Error preparing data for projected volume etc-swift for pod swift-kuttl-tests/swift-storage-0: configmap "swift-ring-files" not found Jan 29 12:30:41 crc kubenswrapper[4753]: E0129 12:30:41.958525 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1-etc-swift podName:fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1 nodeName:}" failed. No retries permitted until 2026-01-29 12:30:57.958494952 +0000 UTC m=+1472.210576407 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1-etc-swift") pod "swift-storage-0" (UID: "fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1") : configmap "swift-ring-files" not found Jan 29 12:30:42 crc kubenswrapper[4753]: I0129 12:30:42.059274 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/cdf821d7-7cec-4c33-a59e-1c6493fd7281-etc-swift\") pod \"swift-storage-1\" (UID: \"cdf821d7-7cec-4c33-a59e-1c6493fd7281\") " pod="swift-kuttl-tests/swift-storage-1" Jan 29 12:30:42 crc kubenswrapper[4753]: I0129 12:30:42.059335 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/5f186cee-89fe-4c25-b227-628ffcb2a98f-etc-swift\") pod \"swift-storage-2\" (UID: \"5f186cee-89fe-4c25-b227-628ffcb2a98f\") " pod="swift-kuttl-tests/swift-storage-2" Jan 29 12:30:42 crc kubenswrapper[4753]: E0129 12:30:42.059528 4753 projected.go:288] Couldn't get configMap swift-kuttl-tests/swift-ring-files: configmap "swift-ring-files" not found Jan 29 12:30:42 crc kubenswrapper[4753]: E0129 12:30:42.059547 4753 projected.go:194] Error preparing data for projected volume etc-swift for pod swift-kuttl-tests/swift-storage-2: configmap "swift-ring-files" not found Jan 29 12:30:42 crc kubenswrapper[4753]: E0129 12:30:42.059604 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/5f186cee-89fe-4c25-b227-628ffcb2a98f-etc-swift podName:5f186cee-89fe-4c25-b227-628ffcb2a98f nodeName:}" failed. No retries permitted until 2026-01-29 12:30:58.059583664 +0000 UTC m=+1472.311665119 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/5f186cee-89fe-4c25-b227-628ffcb2a98f-etc-swift") pod "swift-storage-2" (UID: "5f186cee-89fe-4c25-b227-628ffcb2a98f") : configmap "swift-ring-files" not found Jan 29 12:30:42 crc kubenswrapper[4753]: E0129 12:30:42.059835 4753 projected.go:288] Couldn't get configMap swift-kuttl-tests/swift-ring-files: configmap "swift-ring-files" not found Jan 29 12:30:42 crc kubenswrapper[4753]: E0129 12:30:42.060914 4753 projected.go:194] Error preparing data for projected volume etc-swift for pod swift-kuttl-tests/swift-storage-1: configmap "swift-ring-files" not found Jan 29 12:30:42 crc kubenswrapper[4753]: E0129 12:30:42.061213 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/cdf821d7-7cec-4c33-a59e-1c6493fd7281-etc-swift podName:cdf821d7-7cec-4c33-a59e-1c6493fd7281 nodeName:}" failed. No retries permitted until 2026-01-29 12:30:58.061162249 +0000 UTC m=+1472.313243764 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/cdf821d7-7cec-4c33-a59e-1c6493fd7281-etc-swift") pod "swift-storage-1" (UID: "cdf821d7-7cec-4c33-a59e-1c6493fd7281") : configmap "swift-ring-files" not found Jan 29 12:30:42 crc kubenswrapper[4753]: I0129 12:30:42.870490 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/cec6584a-fcc6-4b1d-8516-c2e6b2194048-etc-swift\") pod \"swift-proxy-6bc96d68cf-kbrl4\" (UID: \"cec6584a-fcc6-4b1d-8516-c2e6b2194048\") " pod="swift-kuttl-tests/swift-proxy-6bc96d68cf-kbrl4" Jan 29 12:30:42 crc kubenswrapper[4753]: E0129 12:30:42.870702 4753 projected.go:288] Couldn't get configMap swift-kuttl-tests/swift-ring-files: configmap "swift-ring-files" not found Jan 29 12:30:42 crc kubenswrapper[4753]: E0129 12:30:42.870890 4753 projected.go:194] Error preparing data for projected volume etc-swift for pod swift-kuttl-tests/swift-proxy-6bc96d68cf-kbrl4: configmap "swift-ring-files" not found Jan 29 12:30:42 crc kubenswrapper[4753]: E0129 12:30:42.870977 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/cec6584a-fcc6-4b1d-8516-c2e6b2194048-etc-swift podName:cec6584a-fcc6-4b1d-8516-c2e6b2194048 nodeName:}" failed. No retries permitted until 2026-01-29 12:30:58.870950042 +0000 UTC m=+1473.123031517 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/cec6584a-fcc6-4b1d-8516-c2e6b2194048-etc-swift") pod "swift-proxy-6bc96d68cf-kbrl4" (UID: "cec6584a-fcc6-4b1d-8516-c2e6b2194048") : configmap "swift-ring-files" not found Jan 29 12:30:44 crc kubenswrapper[4753]: I0129 12:30:44.083674 4753 generic.go:334] "Generic (PLEG): container finished" podID="f284a293-aa84-46cd-bb59-b2d8e3d1762c" containerID="e2f9eb2e7c75f4680190aecbec04893b350096f32936813d869cd07a1d19acb3" exitCode=0 Jan 29 12:30:44 crc kubenswrapper[4753]: I0129 12:30:44.084026 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-ring-rebalance-pckfk" event={"ID":"f284a293-aa84-46cd-bb59-b2d8e3d1762c","Type":"ContainerDied","Data":"e2f9eb2e7c75f4680190aecbec04893b350096f32936813d869cd07a1d19acb3"} Jan 29 12:30:45 crc kubenswrapper[4753]: I0129 12:30:45.423629 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-ring-rebalance-pckfk" Jan 29 12:30:45 crc kubenswrapper[4753]: I0129 12:30:45.589768 4753 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-7fm9z" Jan 29 12:30:45 crc kubenswrapper[4753]: I0129 12:30:45.633780 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-7fm9z" Jan 29 12:30:45 crc kubenswrapper[4753]: I0129 12:30:45.646924 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/f284a293-aa84-46cd-bb59-b2d8e3d1762c-ring-data-devices\") pod \"f284a293-aa84-46cd-bb59-b2d8e3d1762c\" (UID: \"f284a293-aa84-46cd-bb59-b2d8e3d1762c\") " Jan 29 12:30:45 crc kubenswrapper[4753]: I0129 12:30:45.647260 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/f284a293-aa84-46cd-bb59-b2d8e3d1762c-dispersionconf\") pod \"f284a293-aa84-46cd-bb59-b2d8e3d1762c\" (UID: \"f284a293-aa84-46cd-bb59-b2d8e3d1762c\") " Jan 29 12:30:45 crc kubenswrapper[4753]: I0129 12:30:45.647496 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5rx2p\" (UniqueName: \"kubernetes.io/projected/f284a293-aa84-46cd-bb59-b2d8e3d1762c-kube-api-access-5rx2p\") pod \"f284a293-aa84-46cd-bb59-b2d8e3d1762c\" (UID: \"f284a293-aa84-46cd-bb59-b2d8e3d1762c\") " Jan 29 12:30:45 crc kubenswrapper[4753]: I0129 12:30:45.648146 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f284a293-aa84-46cd-bb59-b2d8e3d1762c-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "f284a293-aa84-46cd-bb59-b2d8e3d1762c" (UID: "f284a293-aa84-46cd-bb59-b2d8e3d1762c"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:30:45 crc kubenswrapper[4753]: I0129 12:30:45.648372 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/f284a293-aa84-46cd-bb59-b2d8e3d1762c-etc-swift\") pod \"f284a293-aa84-46cd-bb59-b2d8e3d1762c\" (UID: \"f284a293-aa84-46cd-bb59-b2d8e3d1762c\") " Jan 29 12:30:45 crc kubenswrapper[4753]: I0129 12:30:45.648703 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f284a293-aa84-46cd-bb59-b2d8e3d1762c-scripts\") pod \"f284a293-aa84-46cd-bb59-b2d8e3d1762c\" (UID: \"f284a293-aa84-46cd-bb59-b2d8e3d1762c\") " Jan 29 12:30:45 crc kubenswrapper[4753]: I0129 12:30:45.648992 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/f284a293-aa84-46cd-bb59-b2d8e3d1762c-swiftconf\") pod \"f284a293-aa84-46cd-bb59-b2d8e3d1762c\" (UID: \"f284a293-aa84-46cd-bb59-b2d8e3d1762c\") " Jan 29 12:30:45 crc kubenswrapper[4753]: I0129 12:30:45.649090 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f284a293-aa84-46cd-bb59-b2d8e3d1762c-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "f284a293-aa84-46cd-bb59-b2d8e3d1762c" (UID: "f284a293-aa84-46cd-bb59-b2d8e3d1762c"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:30:45 crc kubenswrapper[4753]: I0129 12:30:45.649885 4753 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/f284a293-aa84-46cd-bb59-b2d8e3d1762c-etc-swift\") on node \"crc\" DevicePath \"\"" Jan 29 12:30:45 crc kubenswrapper[4753]: I0129 12:30:45.649906 4753 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/f284a293-aa84-46cd-bb59-b2d8e3d1762c-ring-data-devices\") on node \"crc\" DevicePath \"\"" Jan 29 12:30:45 crc kubenswrapper[4753]: I0129 12:30:45.654797 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f284a293-aa84-46cd-bb59-b2d8e3d1762c-kube-api-access-5rx2p" (OuterVolumeSpecName: "kube-api-access-5rx2p") pod "f284a293-aa84-46cd-bb59-b2d8e3d1762c" (UID: "f284a293-aa84-46cd-bb59-b2d8e3d1762c"). InnerVolumeSpecName "kube-api-access-5rx2p". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:30:45 crc kubenswrapper[4753]: I0129 12:30:45.676193 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f284a293-aa84-46cd-bb59-b2d8e3d1762c-scripts" (OuterVolumeSpecName: "scripts") pod "f284a293-aa84-46cd-bb59-b2d8e3d1762c" (UID: "f284a293-aa84-46cd-bb59-b2d8e3d1762c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:30:45 crc kubenswrapper[4753]: I0129 12:30:45.678089 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f284a293-aa84-46cd-bb59-b2d8e3d1762c-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "f284a293-aa84-46cd-bb59-b2d8e3d1762c" (UID: "f284a293-aa84-46cd-bb59-b2d8e3d1762c"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:30:45 crc kubenswrapper[4753]: I0129 12:30:45.680826 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f284a293-aa84-46cd-bb59-b2d8e3d1762c-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "f284a293-aa84-46cd-bb59-b2d8e3d1762c" (UID: "f284a293-aa84-46cd-bb59-b2d8e3d1762c"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:30:45 crc kubenswrapper[4753]: I0129 12:30:45.750856 4753 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f284a293-aa84-46cd-bb59-b2d8e3d1762c-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 12:30:45 crc kubenswrapper[4753]: I0129 12:30:45.750900 4753 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/f284a293-aa84-46cd-bb59-b2d8e3d1762c-swiftconf\") on node \"crc\" DevicePath \"\"" Jan 29 12:30:45 crc kubenswrapper[4753]: I0129 12:30:45.750912 4753 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/f284a293-aa84-46cd-bb59-b2d8e3d1762c-dispersionconf\") on node \"crc\" DevicePath \"\"" Jan 29 12:30:45 crc kubenswrapper[4753]: I0129 12:30:45.750937 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5rx2p\" (UniqueName: \"kubernetes.io/projected/f284a293-aa84-46cd-bb59-b2d8e3d1762c-kube-api-access-5rx2p\") on node \"crc\" DevicePath \"\"" Jan 29 12:30:45 crc kubenswrapper[4753]: I0129 12:30:45.831827 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-7fm9z"] Jan 29 12:30:46 crc kubenswrapper[4753]: I0129 12:30:46.113931 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-ring-rebalance-pckfk" event={"ID":"f284a293-aa84-46cd-bb59-b2d8e3d1762c","Type":"ContainerDied","Data":"0ca1933805d489a197171e218f7ad10ca96156ed894359a33efcf181de1e40b7"} Jan 29 12:30:46 crc kubenswrapper[4753]: I0129 12:30:46.113986 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-ring-rebalance-pckfk" Jan 29 12:30:46 crc kubenswrapper[4753]: I0129 12:30:46.114031 4753 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0ca1933805d489a197171e218f7ad10ca96156ed894359a33efcf181de1e40b7" Jan 29 12:30:47 crc kubenswrapper[4753]: I0129 12:30:47.123934 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-7fm9z" podUID="c9067882-bffb-477d-9b76-b22f026a77d5" containerName="registry-server" containerID="cri-o://223abbc0dad901a5529c69ace88ca45fe7c6fb846e6b02a64865d73136ac2842" gracePeriod=2 Jan 29 12:30:48 crc kubenswrapper[4753]: I0129 12:30:48.136025 4753 generic.go:334] "Generic (PLEG): container finished" podID="c9067882-bffb-477d-9b76-b22f026a77d5" containerID="223abbc0dad901a5529c69ace88ca45fe7c6fb846e6b02a64865d73136ac2842" exitCode=0 Jan 29 12:30:48 crc kubenswrapper[4753]: I0129 12:30:48.136091 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7fm9z" event={"ID":"c9067882-bffb-477d-9b76-b22f026a77d5","Type":"ContainerDied","Data":"223abbc0dad901a5529c69ace88ca45fe7c6fb846e6b02a64865d73136ac2842"} Jan 29 12:30:48 crc kubenswrapper[4753]: I0129 12:30:48.253036 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7fm9z" Jan 29 12:30:48 crc kubenswrapper[4753]: I0129 12:30:48.300992 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lw6n8\" (UniqueName: \"kubernetes.io/projected/c9067882-bffb-477d-9b76-b22f026a77d5-kube-api-access-lw6n8\") pod \"c9067882-bffb-477d-9b76-b22f026a77d5\" (UID: \"c9067882-bffb-477d-9b76-b22f026a77d5\") " Jan 29 12:30:48 crc kubenswrapper[4753]: I0129 12:30:48.301048 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c9067882-bffb-477d-9b76-b22f026a77d5-utilities\") pod \"c9067882-bffb-477d-9b76-b22f026a77d5\" (UID: \"c9067882-bffb-477d-9b76-b22f026a77d5\") " Jan 29 12:30:48 crc kubenswrapper[4753]: I0129 12:30:48.304338 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c9067882-bffb-477d-9b76-b22f026a77d5-utilities" (OuterVolumeSpecName: "utilities") pod "c9067882-bffb-477d-9b76-b22f026a77d5" (UID: "c9067882-bffb-477d-9b76-b22f026a77d5"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:30:48 crc kubenswrapper[4753]: I0129 12:30:48.304572 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c9067882-bffb-477d-9b76-b22f026a77d5-catalog-content\") pod \"c9067882-bffb-477d-9b76-b22f026a77d5\" (UID: \"c9067882-bffb-477d-9b76-b22f026a77d5\") " Jan 29 12:30:48 crc kubenswrapper[4753]: I0129 12:30:48.307078 4753 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c9067882-bffb-477d-9b76-b22f026a77d5-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 12:30:48 crc kubenswrapper[4753]: I0129 12:30:48.313678 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c9067882-bffb-477d-9b76-b22f026a77d5-kube-api-access-lw6n8" (OuterVolumeSpecName: "kube-api-access-lw6n8") pod "c9067882-bffb-477d-9b76-b22f026a77d5" (UID: "c9067882-bffb-477d-9b76-b22f026a77d5"). InnerVolumeSpecName "kube-api-access-lw6n8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:30:48 crc kubenswrapper[4753]: I0129 12:30:48.409650 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lw6n8\" (UniqueName: \"kubernetes.io/projected/c9067882-bffb-477d-9b76-b22f026a77d5-kube-api-access-lw6n8\") on node \"crc\" DevicePath \"\"" Jan 29 12:30:48 crc kubenswrapper[4753]: I0129 12:30:48.437531 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c9067882-bffb-477d-9b76-b22f026a77d5-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c9067882-bffb-477d-9b76-b22f026a77d5" (UID: "c9067882-bffb-477d-9b76-b22f026a77d5"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:30:48 crc kubenswrapper[4753]: I0129 12:30:48.510939 4753 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c9067882-bffb-477d-9b76-b22f026a77d5-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 12:30:49 crc kubenswrapper[4753]: I0129 12:30:49.145517 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7fm9z" event={"ID":"c9067882-bffb-477d-9b76-b22f026a77d5","Type":"ContainerDied","Data":"c58f606d1815686cc3c301c4fcc938b5b41f6d2102ed43946efdc23bc02cddc6"} Jan 29 12:30:49 crc kubenswrapper[4753]: I0129 12:30:49.145563 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7fm9z" Jan 29 12:30:49 crc kubenswrapper[4753]: I0129 12:30:49.145917 4753 scope.go:117] "RemoveContainer" containerID="223abbc0dad901a5529c69ace88ca45fe7c6fb846e6b02a64865d73136ac2842" Jan 29 12:30:49 crc kubenswrapper[4753]: I0129 12:30:49.170020 4753 scope.go:117] "RemoveContainer" containerID="078739e1e73bbea27a8ad59f6cc1d174a7d4cc79026e595e70d7ffd544ace1a0" Jan 29 12:30:49 crc kubenswrapper[4753]: I0129 12:30:49.183432 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-7fm9z"] Jan 29 12:30:49 crc kubenswrapper[4753]: I0129 12:30:49.187904 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-7fm9z"] Jan 29 12:30:49 crc kubenswrapper[4753]: I0129 12:30:49.215054 4753 scope.go:117] "RemoveContainer" containerID="15177b2346f85d0d00160d94ae5f021509bb804799a6f5c43364f6e5fe9ff0f0" Jan 29 12:30:49 crc kubenswrapper[4753]: I0129 12:30:49.898052 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c9067882-bffb-477d-9b76-b22f026a77d5" path="/var/lib/kubelet/pods/c9067882-bffb-477d-9b76-b22f026a77d5/volumes" Jan 29 12:30:57 crc kubenswrapper[4753]: I0129 12:30:57.977735 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1-etc-swift\") pod \"swift-storage-0\" (UID: \"fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:30:57 crc kubenswrapper[4753]: I0129 12:30:57.992010 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1-etc-swift\") pod \"swift-storage-0\" (UID: \"fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:30:58 crc kubenswrapper[4753]: I0129 12:30:58.081680 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/cdf821d7-7cec-4c33-a59e-1c6493fd7281-etc-swift\") pod \"swift-storage-1\" (UID: \"cdf821d7-7cec-4c33-a59e-1c6493fd7281\") " pod="swift-kuttl-tests/swift-storage-1" Jan 29 12:30:58 crc kubenswrapper[4753]: I0129 12:30:58.081861 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/5f186cee-89fe-4c25-b227-628ffcb2a98f-etc-swift\") pod \"swift-storage-2\" (UID: \"5f186cee-89fe-4c25-b227-628ffcb2a98f\") " pod="swift-kuttl-tests/swift-storage-2" Jan 29 12:30:58 crc kubenswrapper[4753]: I0129 12:30:58.087166 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/cdf821d7-7cec-4c33-a59e-1c6493fd7281-etc-swift\") pod \"swift-storage-1\" (UID: \"cdf821d7-7cec-4c33-a59e-1c6493fd7281\") " pod="swift-kuttl-tests/swift-storage-1" Jan 29 12:30:58 crc kubenswrapper[4753]: I0129 12:30:58.087538 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/5f186cee-89fe-4c25-b227-628ffcb2a98f-etc-swift\") pod \"swift-storage-2\" (UID: \"5f186cee-89fe-4c25-b227-628ffcb2a98f\") " pod="swift-kuttl-tests/swift-storage-2" Jan 29 12:30:58 crc kubenswrapper[4753]: I0129 12:30:58.090258 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:30:58 crc kubenswrapper[4753]: I0129 12:30:58.185915 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-storage-2" Jan 29 12:30:58 crc kubenswrapper[4753]: I0129 12:30:58.206521 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-storage-1" Jan 29 12:30:58 crc kubenswrapper[4753]: I0129 12:30:58.539788 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/swift-storage-0"] Jan 29 12:30:58 crc kubenswrapper[4753]: W0129 12:30:58.551128 4753 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfb9fbcbf_3cdc_449f_9754_7d54c5f17aa1.slice/crio-4f15ddd8fa257ee30591ad3a5ca43ecefbf5d5a4f7894aff28107a76d04f08d1 WatchSource:0}: Error finding container 4f15ddd8fa257ee30591ad3a5ca43ecefbf5d5a4f7894aff28107a76d04f08d1: Status 404 returned error can't find the container with id 4f15ddd8fa257ee30591ad3a5ca43ecefbf5d5a4f7894aff28107a76d04f08d1 Jan 29 12:30:58 crc kubenswrapper[4753]: I0129 12:30:58.681250 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/swift-storage-2"] Jan 29 12:30:58 crc kubenswrapper[4753]: W0129 12:30:58.692761 4753 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5f186cee_89fe_4c25_b227_628ffcb2a98f.slice/crio-c07f9789194850b328b4afa31d64c118db797aea81cbec08e7d72941358f62f3 WatchSource:0}: Error finding container c07f9789194850b328b4afa31d64c118db797aea81cbec08e7d72941358f62f3: Status 404 returned error can't find the container with id c07f9789194850b328b4afa31d64c118db797aea81cbec08e7d72941358f62f3 Jan 29 12:30:58 crc kubenswrapper[4753]: I0129 12:30:58.766269 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/swift-storage-1"] Jan 29 12:30:58 crc kubenswrapper[4753]: I0129 12:30:58.900032 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/cec6584a-fcc6-4b1d-8516-c2e6b2194048-etc-swift\") pod \"swift-proxy-6bc96d68cf-kbrl4\" (UID: \"cec6584a-fcc6-4b1d-8516-c2e6b2194048\") " pod="swift-kuttl-tests/swift-proxy-6bc96d68cf-kbrl4" Jan 29 12:30:58 crc kubenswrapper[4753]: I0129 12:30:58.913453 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/cec6584a-fcc6-4b1d-8516-c2e6b2194048-etc-swift\") pod \"swift-proxy-6bc96d68cf-kbrl4\" (UID: \"cec6584a-fcc6-4b1d-8516-c2e6b2194048\") " pod="swift-kuttl-tests/swift-proxy-6bc96d68cf-kbrl4" Jan 29 12:30:58 crc kubenswrapper[4753]: I0129 12:30:58.974582 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-proxy-6bc96d68cf-kbrl4" Jan 29 12:30:59 crc kubenswrapper[4753]: I0129 12:30:59.262995 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"5f186cee-89fe-4c25-b227-628ffcb2a98f","Type":"ContainerStarted","Data":"8ade60df808d454198934fc448217bcca19a3633d29a12c5a706cb873f806811"} Jan 29 12:30:59 crc kubenswrapper[4753]: I0129 12:30:59.264446 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"5f186cee-89fe-4c25-b227-628ffcb2a98f","Type":"ContainerStarted","Data":"1737e0ba3b3d084614c0a7a06cc57cad8d732ad8ed13e0c65e0965dfefef1d63"} Jan 29 12:30:59 crc kubenswrapper[4753]: I0129 12:30:59.264467 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"5f186cee-89fe-4c25-b227-628ffcb2a98f","Type":"ContainerStarted","Data":"c07f9789194850b328b4afa31d64c118db797aea81cbec08e7d72941358f62f3"} Jan 29 12:30:59 crc kubenswrapper[4753]: I0129 12:30:59.265841 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1","Type":"ContainerStarted","Data":"1661471db32399586faa01821a0cd7e36b5025a57e9a8d4f08b6a0cdc8302d89"} Jan 29 12:30:59 crc kubenswrapper[4753]: I0129 12:30:59.265907 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1","Type":"ContainerStarted","Data":"4bf0d941158a40c1b8708f56a8bbbbd8f371ac2b901f4843bcdc628feb29b830"} Jan 29 12:30:59 crc kubenswrapper[4753]: I0129 12:30:59.265923 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1","Type":"ContainerStarted","Data":"26ced7e34171d6bfce760564034338cc137c0fec95e21a1eda4b638ce903778b"} Jan 29 12:30:59 crc kubenswrapper[4753]: I0129 12:30:59.265935 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1","Type":"ContainerStarted","Data":"ebbaa821837a219076c7e5eaecb2900a7f98b03a800d03b39a05eb1bc69709ae"} Jan 29 12:30:59 crc kubenswrapper[4753]: I0129 12:30:59.265947 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1","Type":"ContainerStarted","Data":"4f15ddd8fa257ee30591ad3a5ca43ecefbf5d5a4f7894aff28107a76d04f08d1"} Jan 29 12:30:59 crc kubenswrapper[4753]: I0129 12:30:59.268533 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"cdf821d7-7cec-4c33-a59e-1c6493fd7281","Type":"ContainerStarted","Data":"54791b5e4f8e371f9007a17143cbc5838793bd8d3e1ce65c2822161e12d6146c"} Jan 29 12:30:59 crc kubenswrapper[4753]: I0129 12:30:59.268569 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"cdf821d7-7cec-4c33-a59e-1c6493fd7281","Type":"ContainerStarted","Data":"f0eddfb27e0b1ca03ff86905f0d1b81d674be9160c615a4f016f519d650869e7"} Jan 29 12:30:59 crc kubenswrapper[4753]: I0129 12:30:59.268583 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"cdf821d7-7cec-4c33-a59e-1c6493fd7281","Type":"ContainerStarted","Data":"98865a6d1c05bb7d82e89d85309da0a159d575dfadab0935076448e89d09eb7c"} Jan 29 12:30:59 crc kubenswrapper[4753]: I0129 12:30:59.428949 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/swift-proxy-6bc96d68cf-kbrl4"] Jan 29 12:30:59 crc kubenswrapper[4753]: W0129 12:30:59.432768 4753 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcec6584a_fcc6_4b1d_8516_c2e6b2194048.slice/crio-447e090cd54397217cafbcf93cea61a90bcf39c3150f7863a7d006cb27b16131 WatchSource:0}: Error finding container 447e090cd54397217cafbcf93cea61a90bcf39c3150f7863a7d006cb27b16131: Status 404 returned error can't find the container with id 447e090cd54397217cafbcf93cea61a90bcf39c3150f7863a7d006cb27b16131 Jan 29 12:31:00 crc kubenswrapper[4753]: I0129 12:31:00.289302 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"cdf821d7-7cec-4c33-a59e-1c6493fd7281","Type":"ContainerStarted","Data":"49988160277c5cc45dd8900eb16d03199ec0c4ffa40e5904a5dbb7b9d2078f17"} Jan 29 12:31:00 crc kubenswrapper[4753]: I0129 12:31:00.289583 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"cdf821d7-7cec-4c33-a59e-1c6493fd7281","Type":"ContainerStarted","Data":"6d62737aaefa0ae329ab69794e4a09fdbf4a1956c0ade81f0a19a89a0f563158"} Jan 29 12:31:00 crc kubenswrapper[4753]: I0129 12:31:00.289774 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"cdf821d7-7cec-4c33-a59e-1c6493fd7281","Type":"ContainerStarted","Data":"b028c59c7810639143318f825119fe54eacc1a661cb573dc2455ed7e2ba850ed"} Jan 29 12:31:00 crc kubenswrapper[4753]: I0129 12:31:00.293760 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-proxy-6bc96d68cf-kbrl4" event={"ID":"cec6584a-fcc6-4b1d-8516-c2e6b2194048","Type":"ContainerStarted","Data":"3c03eaf094d75e935a3ac244d63b044ef319a92959b6f9fd416b8a3dbdbcc0da"} Jan 29 12:31:00 crc kubenswrapper[4753]: I0129 12:31:00.293813 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-proxy-6bc96d68cf-kbrl4" event={"ID":"cec6584a-fcc6-4b1d-8516-c2e6b2194048","Type":"ContainerStarted","Data":"bdc2ce23aa0b4c724f557767c41572887c03d3fefc7a47884d3282150817b6d9"} Jan 29 12:31:00 crc kubenswrapper[4753]: I0129 12:31:00.293828 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-proxy-6bc96d68cf-kbrl4" event={"ID":"cec6584a-fcc6-4b1d-8516-c2e6b2194048","Type":"ContainerStarted","Data":"447e090cd54397217cafbcf93cea61a90bcf39c3150f7863a7d006cb27b16131"} Jan 29 12:31:00 crc kubenswrapper[4753]: I0129 12:31:00.294634 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="swift-kuttl-tests/swift-proxy-6bc96d68cf-kbrl4" Jan 29 12:31:00 crc kubenswrapper[4753]: I0129 12:31:00.294689 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="swift-kuttl-tests/swift-proxy-6bc96d68cf-kbrl4" Jan 29 12:31:00 crc kubenswrapper[4753]: I0129 12:31:00.326712 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="swift-kuttl-tests/swift-proxy-6bc96d68cf-kbrl4" podStartSLOduration=34.326671297 podStartE2EDuration="34.326671297s" podCreationTimestamp="2026-01-29 12:30:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:31:00.326188093 +0000 UTC m=+1474.578269548" watchObservedRunningTime="2026-01-29 12:31:00.326671297 +0000 UTC m=+1474.578752752" Jan 29 12:31:00 crc kubenswrapper[4753]: I0129 12:31:00.341688 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"5f186cee-89fe-4c25-b227-628ffcb2a98f","Type":"ContainerStarted","Data":"056e27f46aca1e39fcb6267b02e6b8748de9f6836da7bd55a0b5148163a59cdf"} Jan 29 12:31:00 crc kubenswrapper[4753]: I0129 12:31:00.341741 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"5f186cee-89fe-4c25-b227-628ffcb2a98f","Type":"ContainerStarted","Data":"3705c5801c3eba8ddc1d2890a6acdfcdf4c62e5da07c136d125988be1e360884"} Jan 29 12:31:00 crc kubenswrapper[4753]: I0129 12:31:00.341751 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"5f186cee-89fe-4c25-b227-628ffcb2a98f","Type":"ContainerStarted","Data":"9f7afb748e128f506597d0e087b167fcba9224b8b7016f8ee496a4591b0e162c"} Jan 29 12:31:00 crc kubenswrapper[4753]: I0129 12:31:00.341760 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"5f186cee-89fe-4c25-b227-628ffcb2a98f","Type":"ContainerStarted","Data":"31d6993374213dd3b7e564d0a4085cc39908df3ddaf6e54182f707a289ddab96"} Jan 29 12:31:00 crc kubenswrapper[4753]: I0129 12:31:00.347520 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1","Type":"ContainerStarted","Data":"863f0dd428950d22ff5bb1d391e9d5b81ac1a1cbbe4e695d9602a641d07a1a22"} Jan 29 12:31:00 crc kubenswrapper[4753]: I0129 12:31:00.347569 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1","Type":"ContainerStarted","Data":"b75616dd1d2571d9087cbb56f8d01e12a0d244229467ec074a4010fb12da21f3"} Jan 29 12:31:00 crc kubenswrapper[4753]: I0129 12:31:00.347582 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1","Type":"ContainerStarted","Data":"0f829cdb3721a769b92e78700efd5425645fe4eed5ead53148401905c22bb46d"} Jan 29 12:31:01 crc kubenswrapper[4753]: I0129 12:31:01.361631 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"cdf821d7-7cec-4c33-a59e-1c6493fd7281","Type":"ContainerStarted","Data":"7375ec7e5486a51f1304b60d7e06caa5a53e836b083d9d627059439991053ffd"} Jan 29 12:31:01 crc kubenswrapper[4753]: I0129 12:31:01.361697 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"cdf821d7-7cec-4c33-a59e-1c6493fd7281","Type":"ContainerStarted","Data":"0aec20004c67a50d19a5c8d1cf7ed9018b4b95c39a5978455971cdd8179addd1"} Jan 29 12:31:01 crc kubenswrapper[4753]: I0129 12:31:01.361710 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"cdf821d7-7cec-4c33-a59e-1c6493fd7281","Type":"ContainerStarted","Data":"3d01ab550711d3f6ed3858797bfec7a7069bc75766d3c545e26de6dea09f5fb8"} Jan 29 12:31:01 crc kubenswrapper[4753]: I0129 12:31:01.361719 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"cdf821d7-7cec-4c33-a59e-1c6493fd7281","Type":"ContainerStarted","Data":"512e69e3786e84e63a587d8d9745a6408e329502de45c30a1be8a2813fce175e"} Jan 29 12:31:01 crc kubenswrapper[4753]: I0129 12:31:01.367885 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"5f186cee-89fe-4c25-b227-628ffcb2a98f","Type":"ContainerStarted","Data":"b1ec4dfcf68549610147c8cd4d5995a51e66768570c5c1fdb817858b572a2aef"} Jan 29 12:31:01 crc kubenswrapper[4753]: I0129 12:31:01.367970 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"5f186cee-89fe-4c25-b227-628ffcb2a98f","Type":"ContainerStarted","Data":"824a9ea0f9ab12597c31967bb66834f5b783df0905b01089fcbd3905b858c483"} Jan 29 12:31:01 crc kubenswrapper[4753]: I0129 12:31:01.368010 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"5f186cee-89fe-4c25-b227-628ffcb2a98f","Type":"ContainerStarted","Data":"d112d59361aa4821a24ce857e58ec6ead1b63c9892c956f350e18f52f9e10e65"} Jan 29 12:31:01 crc kubenswrapper[4753]: I0129 12:31:01.368025 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"5f186cee-89fe-4c25-b227-628ffcb2a98f","Type":"ContainerStarted","Data":"3225ffbf14a2f46af26051f69cf072841186d28c18bf03982b6718349f799de8"} Jan 29 12:31:01 crc kubenswrapper[4753]: I0129 12:31:01.381096 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1","Type":"ContainerStarted","Data":"e1480ad953e1453491bfbacc844a038d09fdd5a675fd4ec79be9f3f750ee72ae"} Jan 29 12:31:01 crc kubenswrapper[4753]: I0129 12:31:01.381262 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1","Type":"ContainerStarted","Data":"f2e02b2ab58bd62d93b270812546264b3e9210ccac10d6b3ed2c1080f1dba78d"} Jan 29 12:31:01 crc kubenswrapper[4753]: I0129 12:31:01.381283 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1","Type":"ContainerStarted","Data":"fea50e28f0174fd91362c9abb3b729cc30c5f58c114ab482b2127002cf3c630b"} Jan 29 12:31:01 crc kubenswrapper[4753]: I0129 12:31:01.381296 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1","Type":"ContainerStarted","Data":"21a68569c06ad75010210c795bc28381cfc8966ffbce36fdbe1e636ab5477089"} Jan 29 12:31:02 crc kubenswrapper[4753]: I0129 12:31:02.391961 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"cdf821d7-7cec-4c33-a59e-1c6493fd7281","Type":"ContainerStarted","Data":"e0df4d24defb12bbb6148384cb20d00499a3423ed6132f66831d8d0399b66e27"} Jan 29 12:31:02 crc kubenswrapper[4753]: I0129 12:31:02.392011 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"cdf821d7-7cec-4c33-a59e-1c6493fd7281","Type":"ContainerStarted","Data":"16c189b240c3b6af0b5143c94f27ad8f278cf55c71fde6ff59b1123203d274ec"} Jan 29 12:31:02 crc kubenswrapper[4753]: I0129 12:31:02.392022 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"cdf821d7-7cec-4c33-a59e-1c6493fd7281","Type":"ContainerStarted","Data":"d29a9d10061279a2805ca6403127e650b1bbfe36a4163df4e079fae54c5a5970"} Jan 29 12:31:02 crc kubenswrapper[4753]: I0129 12:31:02.392030 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"cdf821d7-7cec-4c33-a59e-1c6493fd7281","Type":"ContainerStarted","Data":"4f9f3bd4b5dcda78683a4fd9f6169c0ba4cacc4f6a05deb5fb5b67519e1959ef"} Jan 29 12:31:02 crc kubenswrapper[4753]: I0129 12:31:02.398424 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"5f186cee-89fe-4c25-b227-628ffcb2a98f","Type":"ContainerStarted","Data":"0a2445c3569e08520140f26ebf5205ae7e804cd6709b85f2a1fb89ce86ea5674"} Jan 29 12:31:02 crc kubenswrapper[4753]: I0129 12:31:02.398477 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"5f186cee-89fe-4c25-b227-628ffcb2a98f","Type":"ContainerStarted","Data":"cdf458faa8e41f1db5e8106107119c792aea631dcda870cad66d95e8a74440aa"} Jan 29 12:31:02 crc kubenswrapper[4753]: I0129 12:31:02.398489 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"5f186cee-89fe-4c25-b227-628ffcb2a98f","Type":"ContainerStarted","Data":"50d6ac3c69c7caed60fbf1e5bb69f1a917f89479ed3e932b76121dae17629596"} Jan 29 12:31:02 crc kubenswrapper[4753]: I0129 12:31:02.398500 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"5f186cee-89fe-4c25-b227-628ffcb2a98f","Type":"ContainerStarted","Data":"afe9540d6c8487275d7527ca39a1a61852a5605ffc987d75def8883cdd9c9d91"} Jan 29 12:31:02 crc kubenswrapper[4753]: I0129 12:31:02.398511 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"5f186cee-89fe-4c25-b227-628ffcb2a98f","Type":"ContainerStarted","Data":"c59dfabc0782c5e45570ebd81541d4892c4494e9a05b790d9d0166ca98e35355"} Jan 29 12:31:02 crc kubenswrapper[4753]: I0129 12:31:02.405000 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1","Type":"ContainerStarted","Data":"b8020c59d568fbfdd9bf91a35ad05830bd19b500dc075b3d4bc3eda195246fab"} Jan 29 12:31:02 crc kubenswrapper[4753]: I0129 12:31:02.405044 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1","Type":"ContainerStarted","Data":"2aa766e3f776a05fb581793f3b320cd526d9395e8e328f981b1cb5007225f32f"} Jan 29 12:31:02 crc kubenswrapper[4753]: I0129 12:31:02.405055 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1","Type":"ContainerStarted","Data":"ae14f26fca5ecff409fc265c6b8752e73c7ecc71d20ff281e328fa1fc8e808fd"} Jan 29 12:31:02 crc kubenswrapper[4753]: I0129 12:31:02.405064 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1","Type":"ContainerStarted","Data":"da95de997017674f5f725d2434c76b898c8454d351d2ae0c048009fecac2a70b"} Jan 29 12:31:02 crc kubenswrapper[4753]: I0129 12:31:02.446524 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="swift-kuttl-tests/swift-storage-2" podStartSLOduration=38.446499747 podStartE2EDuration="38.446499747s" podCreationTimestamp="2026-01-29 12:30:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:31:02.443629916 +0000 UTC m=+1476.695711391" watchObservedRunningTime="2026-01-29 12:31:02.446499747 +0000 UTC m=+1476.698581202" Jan 29 12:31:02 crc kubenswrapper[4753]: I0129 12:31:02.499463 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="swift-kuttl-tests/swift-storage-0" podStartSLOduration=38.499435671 podStartE2EDuration="38.499435671s" podCreationTimestamp="2026-01-29 12:30:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:31:02.489620842 +0000 UTC m=+1476.741702297" watchObservedRunningTime="2026-01-29 12:31:02.499435671 +0000 UTC m=+1476.751517126" Jan 29 12:31:03 crc kubenswrapper[4753]: I0129 12:31:03.421108 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"cdf821d7-7cec-4c33-a59e-1c6493fd7281","Type":"ContainerStarted","Data":"e93a1c2eebca268c75cadde6e248adb908e05fb31946984b83eff825afbd1777"} Jan 29 12:31:03 crc kubenswrapper[4753]: I0129 12:31:03.421193 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"cdf821d7-7cec-4c33-a59e-1c6493fd7281","Type":"ContainerStarted","Data":"194bd65bd1293b044532e09dbd55bdc880c52603b4ee58046654673e07e25e17"} Jan 29 12:31:03 crc kubenswrapper[4753]: I0129 12:31:03.464609 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="swift-kuttl-tests/swift-storage-1" podStartSLOduration=39.464580069 podStartE2EDuration="39.464580069s" podCreationTimestamp="2026-01-29 12:30:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:31:03.455823301 +0000 UTC m=+1477.707904776" watchObservedRunningTime="2026-01-29 12:31:03.464580069 +0000 UTC m=+1477.716661524" Jan 29 12:31:08 crc kubenswrapper[4753]: I0129 12:31:08.978483 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="swift-kuttl-tests/swift-proxy-6bc96d68cf-kbrl4" Jan 29 12:31:08 crc kubenswrapper[4753]: I0129 12:31:08.979353 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="swift-kuttl-tests/swift-proxy-6bc96d68cf-kbrl4" Jan 29 12:31:10 crc kubenswrapper[4753]: I0129 12:31:10.554343 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["swift-kuttl-tests/swift-ring-rebalance-debug-prwvt"] Jan 29 12:31:10 crc kubenswrapper[4753]: E0129 12:31:10.555348 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f284a293-aa84-46cd-bb59-b2d8e3d1762c" containerName="swift-ring-rebalance" Jan 29 12:31:10 crc kubenswrapper[4753]: I0129 12:31:10.555407 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="f284a293-aa84-46cd-bb59-b2d8e3d1762c" containerName="swift-ring-rebalance" Jan 29 12:31:10 crc kubenswrapper[4753]: E0129 12:31:10.555488 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c9067882-bffb-477d-9b76-b22f026a77d5" containerName="extract-content" Jan 29 12:31:10 crc kubenswrapper[4753]: I0129 12:31:10.555499 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="c9067882-bffb-477d-9b76-b22f026a77d5" containerName="extract-content" Jan 29 12:31:10 crc kubenswrapper[4753]: E0129 12:31:10.555514 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c9067882-bffb-477d-9b76-b22f026a77d5" containerName="registry-server" Jan 29 12:31:10 crc kubenswrapper[4753]: I0129 12:31:10.555523 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="c9067882-bffb-477d-9b76-b22f026a77d5" containerName="registry-server" Jan 29 12:31:10 crc kubenswrapper[4753]: E0129 12:31:10.555537 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c9067882-bffb-477d-9b76-b22f026a77d5" containerName="extract-utilities" Jan 29 12:31:10 crc kubenswrapper[4753]: I0129 12:31:10.555545 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="c9067882-bffb-477d-9b76-b22f026a77d5" containerName="extract-utilities" Jan 29 12:31:10 crc kubenswrapper[4753]: I0129 12:31:10.555836 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="c9067882-bffb-477d-9b76-b22f026a77d5" containerName="registry-server" Jan 29 12:31:10 crc kubenswrapper[4753]: I0129 12:31:10.555862 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="f284a293-aa84-46cd-bb59-b2d8e3d1762c" containerName="swift-ring-rebalance" Jan 29 12:31:10 crc kubenswrapper[4753]: I0129 12:31:10.556840 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-ring-rebalance-debug-prwvt" Jan 29 12:31:10 crc kubenswrapper[4753]: I0129 12:31:10.559670 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"swift-kuttl-tests"/"swift-ring-config-data" Jan 29 12:31:10 crc kubenswrapper[4753]: I0129 12:31:10.560385 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"swift-kuttl-tests"/"swift-ring-scripts" Jan 29 12:31:10 crc kubenswrapper[4753]: I0129 12:31:10.572489 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/swift-ring-rebalance-debug-prwvt"] Jan 29 12:31:10 crc kubenswrapper[4753]: I0129 12:31:10.590481 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/dbcf77ee-0621-4095-83f7-31ea04a5daa0-scripts\") pod \"swift-ring-rebalance-debug-prwvt\" (UID: \"dbcf77ee-0621-4095-83f7-31ea04a5daa0\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-prwvt" Jan 29 12:31:10 crc kubenswrapper[4753]: I0129 12:31:10.590546 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/dbcf77ee-0621-4095-83f7-31ea04a5daa0-dispersionconf\") pod \"swift-ring-rebalance-debug-prwvt\" (UID: \"dbcf77ee-0621-4095-83f7-31ea04a5daa0\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-prwvt" Jan 29 12:31:10 crc kubenswrapper[4753]: I0129 12:31:10.590589 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ndmqf\" (UniqueName: \"kubernetes.io/projected/dbcf77ee-0621-4095-83f7-31ea04a5daa0-kube-api-access-ndmqf\") pod \"swift-ring-rebalance-debug-prwvt\" (UID: \"dbcf77ee-0621-4095-83f7-31ea04a5daa0\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-prwvt" Jan 29 12:31:10 crc kubenswrapper[4753]: I0129 12:31:10.590635 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/dbcf77ee-0621-4095-83f7-31ea04a5daa0-ring-data-devices\") pod \"swift-ring-rebalance-debug-prwvt\" (UID: \"dbcf77ee-0621-4095-83f7-31ea04a5daa0\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-prwvt" Jan 29 12:31:10 crc kubenswrapper[4753]: I0129 12:31:10.590654 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/dbcf77ee-0621-4095-83f7-31ea04a5daa0-swiftconf\") pod \"swift-ring-rebalance-debug-prwvt\" (UID: \"dbcf77ee-0621-4095-83f7-31ea04a5daa0\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-prwvt" Jan 29 12:31:10 crc kubenswrapper[4753]: I0129 12:31:10.590680 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/dbcf77ee-0621-4095-83f7-31ea04a5daa0-etc-swift\") pod \"swift-ring-rebalance-debug-prwvt\" (UID: \"dbcf77ee-0621-4095-83f7-31ea04a5daa0\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-prwvt" Jan 29 12:31:10 crc kubenswrapper[4753]: I0129 12:31:10.692016 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/dbcf77ee-0621-4095-83f7-31ea04a5daa0-dispersionconf\") pod \"swift-ring-rebalance-debug-prwvt\" (UID: \"dbcf77ee-0621-4095-83f7-31ea04a5daa0\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-prwvt" Jan 29 12:31:10 crc kubenswrapper[4753]: I0129 12:31:10.692109 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ndmqf\" (UniqueName: \"kubernetes.io/projected/dbcf77ee-0621-4095-83f7-31ea04a5daa0-kube-api-access-ndmqf\") pod \"swift-ring-rebalance-debug-prwvt\" (UID: \"dbcf77ee-0621-4095-83f7-31ea04a5daa0\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-prwvt" Jan 29 12:31:10 crc kubenswrapper[4753]: I0129 12:31:10.692180 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/dbcf77ee-0621-4095-83f7-31ea04a5daa0-ring-data-devices\") pod \"swift-ring-rebalance-debug-prwvt\" (UID: \"dbcf77ee-0621-4095-83f7-31ea04a5daa0\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-prwvt" Jan 29 12:31:10 crc kubenswrapper[4753]: I0129 12:31:10.692212 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/dbcf77ee-0621-4095-83f7-31ea04a5daa0-etc-swift\") pod \"swift-ring-rebalance-debug-prwvt\" (UID: \"dbcf77ee-0621-4095-83f7-31ea04a5daa0\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-prwvt" Jan 29 12:31:10 crc kubenswrapper[4753]: I0129 12:31:10.692273 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/dbcf77ee-0621-4095-83f7-31ea04a5daa0-swiftconf\") pod \"swift-ring-rebalance-debug-prwvt\" (UID: \"dbcf77ee-0621-4095-83f7-31ea04a5daa0\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-prwvt" Jan 29 12:31:10 crc kubenswrapper[4753]: I0129 12:31:10.692362 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/dbcf77ee-0621-4095-83f7-31ea04a5daa0-scripts\") pod \"swift-ring-rebalance-debug-prwvt\" (UID: \"dbcf77ee-0621-4095-83f7-31ea04a5daa0\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-prwvt" Jan 29 12:31:10 crc kubenswrapper[4753]: I0129 12:31:10.694091 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/dbcf77ee-0621-4095-83f7-31ea04a5daa0-scripts\") pod \"swift-ring-rebalance-debug-prwvt\" (UID: \"dbcf77ee-0621-4095-83f7-31ea04a5daa0\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-prwvt" Jan 29 12:31:10 crc kubenswrapper[4753]: I0129 12:31:10.694407 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/dbcf77ee-0621-4095-83f7-31ea04a5daa0-ring-data-devices\") pod \"swift-ring-rebalance-debug-prwvt\" (UID: \"dbcf77ee-0621-4095-83f7-31ea04a5daa0\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-prwvt" Jan 29 12:31:10 crc kubenswrapper[4753]: I0129 12:31:10.694621 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/dbcf77ee-0621-4095-83f7-31ea04a5daa0-etc-swift\") pod \"swift-ring-rebalance-debug-prwvt\" (UID: \"dbcf77ee-0621-4095-83f7-31ea04a5daa0\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-prwvt" Jan 29 12:31:10 crc kubenswrapper[4753]: I0129 12:31:10.698816 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/dbcf77ee-0621-4095-83f7-31ea04a5daa0-swiftconf\") pod \"swift-ring-rebalance-debug-prwvt\" (UID: \"dbcf77ee-0621-4095-83f7-31ea04a5daa0\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-prwvt" Jan 29 12:31:10 crc kubenswrapper[4753]: I0129 12:31:10.703049 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/dbcf77ee-0621-4095-83f7-31ea04a5daa0-dispersionconf\") pod \"swift-ring-rebalance-debug-prwvt\" (UID: \"dbcf77ee-0621-4095-83f7-31ea04a5daa0\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-prwvt" Jan 29 12:31:10 crc kubenswrapper[4753]: I0129 12:31:10.718168 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ndmqf\" (UniqueName: \"kubernetes.io/projected/dbcf77ee-0621-4095-83f7-31ea04a5daa0-kube-api-access-ndmqf\") pod \"swift-ring-rebalance-debug-prwvt\" (UID: \"dbcf77ee-0621-4095-83f7-31ea04a5daa0\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-prwvt" Jan 29 12:31:10 crc kubenswrapper[4753]: I0129 12:31:10.887919 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-ring-rebalance-debug-prwvt" Jan 29 12:31:11 crc kubenswrapper[4753]: I0129 12:31:11.111618 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/swift-ring-rebalance-debug-prwvt"] Jan 29 12:31:11 crc kubenswrapper[4753]: I0129 12:31:11.549772 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-ring-rebalance-debug-prwvt" event={"ID":"dbcf77ee-0621-4095-83f7-31ea04a5daa0","Type":"ContainerStarted","Data":"af1ebe0bf77572149389aa0735cd954615cb960c22f2d09604e56b99ece70e08"} Jan 29 12:31:11 crc kubenswrapper[4753]: I0129 12:31:11.549831 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-ring-rebalance-debug-prwvt" event={"ID":"dbcf77ee-0621-4095-83f7-31ea04a5daa0","Type":"ContainerStarted","Data":"df4add58b30687a97ee4322da5a53a37bf0f749344e1d5e4c09a259e030bbb6f"} Jan 29 12:31:11 crc kubenswrapper[4753]: I0129 12:31:11.571423 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="swift-kuttl-tests/swift-ring-rebalance-debug-prwvt" podStartSLOduration=1.571367736 podStartE2EDuration="1.571367736s" podCreationTimestamp="2026-01-29 12:31:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:31:11.564148251 +0000 UTC m=+1485.816229716" watchObservedRunningTime="2026-01-29 12:31:11.571367736 +0000 UTC m=+1485.823449191" Jan 29 12:31:15 crc kubenswrapper[4753]: I0129 12:31:15.683347 4753 generic.go:334] "Generic (PLEG): container finished" podID="dbcf77ee-0621-4095-83f7-31ea04a5daa0" containerID="af1ebe0bf77572149389aa0735cd954615cb960c22f2d09604e56b99ece70e08" exitCode=0 Jan 29 12:31:15 crc kubenswrapper[4753]: I0129 12:31:15.683430 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-ring-rebalance-debug-prwvt" event={"ID":"dbcf77ee-0621-4095-83f7-31ea04a5daa0","Type":"ContainerDied","Data":"af1ebe0bf77572149389aa0735cd954615cb960c22f2d09604e56b99ece70e08"} Jan 29 12:31:17 crc kubenswrapper[4753]: I0129 12:31:17.106262 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-ring-rebalance-debug-prwvt" Jan 29 12:31:17 crc kubenswrapper[4753]: I0129 12:31:17.147257 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["swift-kuttl-tests/swift-ring-rebalance-debug-prwvt"] Jan 29 12:31:17 crc kubenswrapper[4753]: I0129 12:31:17.156878 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["swift-kuttl-tests/swift-ring-rebalance-debug-prwvt"] Jan 29 12:31:17 crc kubenswrapper[4753]: I0129 12:31:17.347916 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["swift-kuttl-tests/swift-ring-rebalance-debug-h2p96"] Jan 29 12:31:17 crc kubenswrapper[4753]: E0129 12:31:17.348261 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dbcf77ee-0621-4095-83f7-31ea04a5daa0" containerName="swift-ring-rebalance" Jan 29 12:31:17 crc kubenswrapper[4753]: I0129 12:31:17.348277 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="dbcf77ee-0621-4095-83f7-31ea04a5daa0" containerName="swift-ring-rebalance" Jan 29 12:31:17 crc kubenswrapper[4753]: I0129 12:31:17.348442 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="dbcf77ee-0621-4095-83f7-31ea04a5daa0" containerName="swift-ring-rebalance" Jan 29 12:31:17 crc kubenswrapper[4753]: I0129 12:31:17.348957 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-ring-rebalance-debug-h2p96" Jan 29 12:31:17 crc kubenswrapper[4753]: I0129 12:31:17.368959 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/swift-ring-rebalance-debug-h2p96"] Jan 29 12:31:17 crc kubenswrapper[4753]: I0129 12:31:17.410785 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/dbcf77ee-0621-4095-83f7-31ea04a5daa0-scripts\") pod \"dbcf77ee-0621-4095-83f7-31ea04a5daa0\" (UID: \"dbcf77ee-0621-4095-83f7-31ea04a5daa0\") " Jan 29 12:31:17 crc kubenswrapper[4753]: I0129 12:31:17.410844 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/dbcf77ee-0621-4095-83f7-31ea04a5daa0-dispersionconf\") pod \"dbcf77ee-0621-4095-83f7-31ea04a5daa0\" (UID: \"dbcf77ee-0621-4095-83f7-31ea04a5daa0\") " Jan 29 12:31:17 crc kubenswrapper[4753]: I0129 12:31:17.410894 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/dbcf77ee-0621-4095-83f7-31ea04a5daa0-swiftconf\") pod \"dbcf77ee-0621-4095-83f7-31ea04a5daa0\" (UID: \"dbcf77ee-0621-4095-83f7-31ea04a5daa0\") " Jan 29 12:31:17 crc kubenswrapper[4753]: I0129 12:31:17.410960 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/dbcf77ee-0621-4095-83f7-31ea04a5daa0-etc-swift\") pod \"dbcf77ee-0621-4095-83f7-31ea04a5daa0\" (UID: \"dbcf77ee-0621-4095-83f7-31ea04a5daa0\") " Jan 29 12:31:17 crc kubenswrapper[4753]: I0129 12:31:17.411026 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/dbcf77ee-0621-4095-83f7-31ea04a5daa0-ring-data-devices\") pod \"dbcf77ee-0621-4095-83f7-31ea04a5daa0\" (UID: \"dbcf77ee-0621-4095-83f7-31ea04a5daa0\") " Jan 29 12:31:17 crc kubenswrapper[4753]: I0129 12:31:17.411093 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ndmqf\" (UniqueName: \"kubernetes.io/projected/dbcf77ee-0621-4095-83f7-31ea04a5daa0-kube-api-access-ndmqf\") pod \"dbcf77ee-0621-4095-83f7-31ea04a5daa0\" (UID: \"dbcf77ee-0621-4095-83f7-31ea04a5daa0\") " Jan 29 12:31:17 crc kubenswrapper[4753]: I0129 12:31:17.413162 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dbcf77ee-0621-4095-83f7-31ea04a5daa0-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "dbcf77ee-0621-4095-83f7-31ea04a5daa0" (UID: "dbcf77ee-0621-4095-83f7-31ea04a5daa0"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:31:17 crc kubenswrapper[4753]: I0129 12:31:17.413274 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dbcf77ee-0621-4095-83f7-31ea04a5daa0-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "dbcf77ee-0621-4095-83f7-31ea04a5daa0" (UID: "dbcf77ee-0621-4095-83f7-31ea04a5daa0"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:31:17 crc kubenswrapper[4753]: I0129 12:31:17.416773 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dbcf77ee-0621-4095-83f7-31ea04a5daa0-kube-api-access-ndmqf" (OuterVolumeSpecName: "kube-api-access-ndmqf") pod "dbcf77ee-0621-4095-83f7-31ea04a5daa0" (UID: "dbcf77ee-0621-4095-83f7-31ea04a5daa0"). InnerVolumeSpecName "kube-api-access-ndmqf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:31:17 crc kubenswrapper[4753]: I0129 12:31:17.429588 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dbcf77ee-0621-4095-83f7-31ea04a5daa0-scripts" (OuterVolumeSpecName: "scripts") pod "dbcf77ee-0621-4095-83f7-31ea04a5daa0" (UID: "dbcf77ee-0621-4095-83f7-31ea04a5daa0"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:31:17 crc kubenswrapper[4753]: I0129 12:31:17.431776 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dbcf77ee-0621-4095-83f7-31ea04a5daa0-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "dbcf77ee-0621-4095-83f7-31ea04a5daa0" (UID: "dbcf77ee-0621-4095-83f7-31ea04a5daa0"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:31:17 crc kubenswrapper[4753]: I0129 12:31:17.438708 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dbcf77ee-0621-4095-83f7-31ea04a5daa0-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "dbcf77ee-0621-4095-83f7-31ea04a5daa0" (UID: "dbcf77ee-0621-4095-83f7-31ea04a5daa0"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:31:17 crc kubenswrapper[4753]: I0129 12:31:17.512767 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zh8ws\" (UniqueName: \"kubernetes.io/projected/5fc44961-6be6-4605-ba44-397260d294b9-kube-api-access-zh8ws\") pod \"swift-ring-rebalance-debug-h2p96\" (UID: \"5fc44961-6be6-4605-ba44-397260d294b9\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-h2p96" Jan 29 12:31:17 crc kubenswrapper[4753]: I0129 12:31:17.512949 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/5fc44961-6be6-4605-ba44-397260d294b9-etc-swift\") pod \"swift-ring-rebalance-debug-h2p96\" (UID: \"5fc44961-6be6-4605-ba44-397260d294b9\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-h2p96" Jan 29 12:31:17 crc kubenswrapper[4753]: I0129 12:31:17.513047 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/5fc44961-6be6-4605-ba44-397260d294b9-swiftconf\") pod \"swift-ring-rebalance-debug-h2p96\" (UID: \"5fc44961-6be6-4605-ba44-397260d294b9\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-h2p96" Jan 29 12:31:17 crc kubenswrapper[4753]: I0129 12:31:17.513195 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5fc44961-6be6-4605-ba44-397260d294b9-scripts\") pod \"swift-ring-rebalance-debug-h2p96\" (UID: \"5fc44961-6be6-4605-ba44-397260d294b9\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-h2p96" Jan 29 12:31:17 crc kubenswrapper[4753]: I0129 12:31:17.513278 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/5fc44961-6be6-4605-ba44-397260d294b9-ring-data-devices\") pod \"swift-ring-rebalance-debug-h2p96\" (UID: \"5fc44961-6be6-4605-ba44-397260d294b9\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-h2p96" Jan 29 12:31:17 crc kubenswrapper[4753]: I0129 12:31:17.513316 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/5fc44961-6be6-4605-ba44-397260d294b9-dispersionconf\") pod \"swift-ring-rebalance-debug-h2p96\" (UID: \"5fc44961-6be6-4605-ba44-397260d294b9\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-h2p96" Jan 29 12:31:17 crc kubenswrapper[4753]: I0129 12:31:17.513527 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ndmqf\" (UniqueName: \"kubernetes.io/projected/dbcf77ee-0621-4095-83f7-31ea04a5daa0-kube-api-access-ndmqf\") on node \"crc\" DevicePath \"\"" Jan 29 12:31:17 crc kubenswrapper[4753]: I0129 12:31:17.513571 4753 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/dbcf77ee-0621-4095-83f7-31ea04a5daa0-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 12:31:17 crc kubenswrapper[4753]: I0129 12:31:17.513583 4753 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/dbcf77ee-0621-4095-83f7-31ea04a5daa0-dispersionconf\") on node \"crc\" DevicePath \"\"" Jan 29 12:31:17 crc kubenswrapper[4753]: I0129 12:31:17.513593 4753 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/dbcf77ee-0621-4095-83f7-31ea04a5daa0-swiftconf\") on node \"crc\" DevicePath \"\"" Jan 29 12:31:17 crc kubenswrapper[4753]: I0129 12:31:17.513601 4753 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/dbcf77ee-0621-4095-83f7-31ea04a5daa0-etc-swift\") on node \"crc\" DevicePath \"\"" Jan 29 12:31:17 crc kubenswrapper[4753]: I0129 12:31:17.513610 4753 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/dbcf77ee-0621-4095-83f7-31ea04a5daa0-ring-data-devices\") on node \"crc\" DevicePath \"\"" Jan 29 12:31:17 crc kubenswrapper[4753]: I0129 12:31:17.615080 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zh8ws\" (UniqueName: \"kubernetes.io/projected/5fc44961-6be6-4605-ba44-397260d294b9-kube-api-access-zh8ws\") pod \"swift-ring-rebalance-debug-h2p96\" (UID: \"5fc44961-6be6-4605-ba44-397260d294b9\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-h2p96" Jan 29 12:31:17 crc kubenswrapper[4753]: I0129 12:31:17.615186 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/5fc44961-6be6-4605-ba44-397260d294b9-etc-swift\") pod \"swift-ring-rebalance-debug-h2p96\" (UID: \"5fc44961-6be6-4605-ba44-397260d294b9\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-h2p96" Jan 29 12:31:17 crc kubenswrapper[4753]: I0129 12:31:17.615267 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/5fc44961-6be6-4605-ba44-397260d294b9-swiftconf\") pod \"swift-ring-rebalance-debug-h2p96\" (UID: \"5fc44961-6be6-4605-ba44-397260d294b9\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-h2p96" Jan 29 12:31:17 crc kubenswrapper[4753]: I0129 12:31:17.615330 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5fc44961-6be6-4605-ba44-397260d294b9-scripts\") pod \"swift-ring-rebalance-debug-h2p96\" (UID: \"5fc44961-6be6-4605-ba44-397260d294b9\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-h2p96" Jan 29 12:31:17 crc kubenswrapper[4753]: I0129 12:31:17.615382 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/5fc44961-6be6-4605-ba44-397260d294b9-dispersionconf\") pod \"swift-ring-rebalance-debug-h2p96\" (UID: \"5fc44961-6be6-4605-ba44-397260d294b9\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-h2p96" Jan 29 12:31:17 crc kubenswrapper[4753]: I0129 12:31:17.615405 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/5fc44961-6be6-4605-ba44-397260d294b9-ring-data-devices\") pod \"swift-ring-rebalance-debug-h2p96\" (UID: \"5fc44961-6be6-4605-ba44-397260d294b9\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-h2p96" Jan 29 12:31:17 crc kubenswrapper[4753]: I0129 12:31:17.616539 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/5fc44961-6be6-4605-ba44-397260d294b9-ring-data-devices\") pod \"swift-ring-rebalance-debug-h2p96\" (UID: \"5fc44961-6be6-4605-ba44-397260d294b9\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-h2p96" Jan 29 12:31:17 crc kubenswrapper[4753]: I0129 12:31:17.617259 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5fc44961-6be6-4605-ba44-397260d294b9-scripts\") pod \"swift-ring-rebalance-debug-h2p96\" (UID: \"5fc44961-6be6-4605-ba44-397260d294b9\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-h2p96" Jan 29 12:31:17 crc kubenswrapper[4753]: I0129 12:31:17.618304 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/5fc44961-6be6-4605-ba44-397260d294b9-etc-swift\") pod \"swift-ring-rebalance-debug-h2p96\" (UID: \"5fc44961-6be6-4605-ba44-397260d294b9\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-h2p96" Jan 29 12:31:17 crc kubenswrapper[4753]: I0129 12:31:17.627119 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/5fc44961-6be6-4605-ba44-397260d294b9-dispersionconf\") pod \"swift-ring-rebalance-debug-h2p96\" (UID: \"5fc44961-6be6-4605-ba44-397260d294b9\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-h2p96" Jan 29 12:31:17 crc kubenswrapper[4753]: I0129 12:31:17.627134 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/5fc44961-6be6-4605-ba44-397260d294b9-swiftconf\") pod \"swift-ring-rebalance-debug-h2p96\" (UID: \"5fc44961-6be6-4605-ba44-397260d294b9\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-h2p96" Jan 29 12:31:17 crc kubenswrapper[4753]: I0129 12:31:17.632064 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zh8ws\" (UniqueName: \"kubernetes.io/projected/5fc44961-6be6-4605-ba44-397260d294b9-kube-api-access-zh8ws\") pod \"swift-ring-rebalance-debug-h2p96\" (UID: \"5fc44961-6be6-4605-ba44-397260d294b9\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-h2p96" Jan 29 12:31:17 crc kubenswrapper[4753]: I0129 12:31:17.704120 4753 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="df4add58b30687a97ee4322da5a53a37bf0f749344e1d5e4c09a259e030bbb6f" Jan 29 12:31:17 crc kubenswrapper[4753]: I0129 12:31:17.704185 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-ring-rebalance-debug-prwvt" Jan 29 12:31:17 crc kubenswrapper[4753]: I0129 12:31:17.709412 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-ring-rebalance-debug-h2p96" Jan 29 12:31:17 crc kubenswrapper[4753]: I0129 12:31:17.898953 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dbcf77ee-0621-4095-83f7-31ea04a5daa0" path="/var/lib/kubelet/pods/dbcf77ee-0621-4095-83f7-31ea04a5daa0/volumes" Jan 29 12:31:18 crc kubenswrapper[4753]: I0129 12:31:18.032141 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/swift-ring-rebalance-debug-h2p96"] Jan 29 12:31:18 crc kubenswrapper[4753]: W0129 12:31:18.042751 4753 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5fc44961_6be6_4605_ba44_397260d294b9.slice/crio-ee9eef532d6c5987cd2ba63faab76f0f24617a2dff848bee9181dd51f9244170 WatchSource:0}: Error finding container ee9eef532d6c5987cd2ba63faab76f0f24617a2dff848bee9181dd51f9244170: Status 404 returned error can't find the container with id ee9eef532d6c5987cd2ba63faab76f0f24617a2dff848bee9181dd51f9244170 Jan 29 12:31:18 crc kubenswrapper[4753]: I0129 12:31:18.723365 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-ring-rebalance-debug-h2p96" event={"ID":"5fc44961-6be6-4605-ba44-397260d294b9","Type":"ContainerStarted","Data":"f06ef499c931eccca294f5cfa38f6da4b4d8519bacce3cc18ebb231c7e9803a7"} Jan 29 12:31:18 crc kubenswrapper[4753]: I0129 12:31:18.723694 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-ring-rebalance-debug-h2p96" event={"ID":"5fc44961-6be6-4605-ba44-397260d294b9","Type":"ContainerStarted","Data":"ee9eef532d6c5987cd2ba63faab76f0f24617a2dff848bee9181dd51f9244170"} Jan 29 12:31:18 crc kubenswrapper[4753]: I0129 12:31:18.757415 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="swift-kuttl-tests/swift-ring-rebalance-debug-h2p96" podStartSLOduration=1.756384437 podStartE2EDuration="1.756384437s" podCreationTimestamp="2026-01-29 12:31:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:31:18.743408289 +0000 UTC m=+1492.995489744" watchObservedRunningTime="2026-01-29 12:31:18.756384437 +0000 UTC m=+1493.008465892" Jan 29 12:31:20 crc kubenswrapper[4753]: I0129 12:31:20.741570 4753 generic.go:334] "Generic (PLEG): container finished" podID="5fc44961-6be6-4605-ba44-397260d294b9" containerID="f06ef499c931eccca294f5cfa38f6da4b4d8519bacce3cc18ebb231c7e9803a7" exitCode=0 Jan 29 12:31:20 crc kubenswrapper[4753]: I0129 12:31:20.741949 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-ring-rebalance-debug-h2p96" event={"ID":"5fc44961-6be6-4605-ba44-397260d294b9","Type":"ContainerDied","Data":"f06ef499c931eccca294f5cfa38f6da4b4d8519bacce3cc18ebb231c7e9803a7"} Jan 29 12:31:22 crc kubenswrapper[4753]: I0129 12:31:22.213365 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-ring-rebalance-debug-h2p96" Jan 29 12:31:22 crc kubenswrapper[4753]: I0129 12:31:22.255275 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["swift-kuttl-tests/swift-ring-rebalance-debug-h2p96"] Jan 29 12:31:22 crc kubenswrapper[4753]: I0129 12:31:22.260723 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["swift-kuttl-tests/swift-ring-rebalance-debug-h2p96"] Jan 29 12:31:22 crc kubenswrapper[4753]: I0129 12:31:22.391173 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zh8ws\" (UniqueName: \"kubernetes.io/projected/5fc44961-6be6-4605-ba44-397260d294b9-kube-api-access-zh8ws\") pod \"5fc44961-6be6-4605-ba44-397260d294b9\" (UID: \"5fc44961-6be6-4605-ba44-397260d294b9\") " Jan 29 12:31:22 crc kubenswrapper[4753]: I0129 12:31:22.391237 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/5fc44961-6be6-4605-ba44-397260d294b9-dispersionconf\") pod \"5fc44961-6be6-4605-ba44-397260d294b9\" (UID: \"5fc44961-6be6-4605-ba44-397260d294b9\") " Jan 29 12:31:22 crc kubenswrapper[4753]: I0129 12:31:22.391302 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/5fc44961-6be6-4605-ba44-397260d294b9-etc-swift\") pod \"5fc44961-6be6-4605-ba44-397260d294b9\" (UID: \"5fc44961-6be6-4605-ba44-397260d294b9\") " Jan 29 12:31:22 crc kubenswrapper[4753]: I0129 12:31:22.391323 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/5fc44961-6be6-4605-ba44-397260d294b9-ring-data-devices\") pod \"5fc44961-6be6-4605-ba44-397260d294b9\" (UID: \"5fc44961-6be6-4605-ba44-397260d294b9\") " Jan 29 12:31:22 crc kubenswrapper[4753]: I0129 12:31:22.391354 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/5fc44961-6be6-4605-ba44-397260d294b9-swiftconf\") pod \"5fc44961-6be6-4605-ba44-397260d294b9\" (UID: \"5fc44961-6be6-4605-ba44-397260d294b9\") " Jan 29 12:31:22 crc kubenswrapper[4753]: I0129 12:31:22.391402 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5fc44961-6be6-4605-ba44-397260d294b9-scripts\") pod \"5fc44961-6be6-4605-ba44-397260d294b9\" (UID: \"5fc44961-6be6-4605-ba44-397260d294b9\") " Jan 29 12:31:22 crc kubenswrapper[4753]: I0129 12:31:22.395370 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5fc44961-6be6-4605-ba44-397260d294b9-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "5fc44961-6be6-4605-ba44-397260d294b9" (UID: "5fc44961-6be6-4605-ba44-397260d294b9"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:31:22 crc kubenswrapper[4753]: I0129 12:31:22.395468 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5fc44961-6be6-4605-ba44-397260d294b9-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "5fc44961-6be6-4605-ba44-397260d294b9" (UID: "5fc44961-6be6-4605-ba44-397260d294b9"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:31:22 crc kubenswrapper[4753]: I0129 12:31:22.400537 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fc44961-6be6-4605-ba44-397260d294b9-kube-api-access-zh8ws" (OuterVolumeSpecName: "kube-api-access-zh8ws") pod "5fc44961-6be6-4605-ba44-397260d294b9" (UID: "5fc44961-6be6-4605-ba44-397260d294b9"). InnerVolumeSpecName "kube-api-access-zh8ws". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:31:22 crc kubenswrapper[4753]: I0129 12:31:22.417755 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fc44961-6be6-4605-ba44-397260d294b9-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "5fc44961-6be6-4605-ba44-397260d294b9" (UID: "5fc44961-6be6-4605-ba44-397260d294b9"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:31:22 crc kubenswrapper[4753]: I0129 12:31:22.418188 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fc44961-6be6-4605-ba44-397260d294b9-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "5fc44961-6be6-4605-ba44-397260d294b9" (UID: "5fc44961-6be6-4605-ba44-397260d294b9"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:31:22 crc kubenswrapper[4753]: I0129 12:31:22.418322 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5fc44961-6be6-4605-ba44-397260d294b9-scripts" (OuterVolumeSpecName: "scripts") pod "5fc44961-6be6-4605-ba44-397260d294b9" (UID: "5fc44961-6be6-4605-ba44-397260d294b9"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:31:22 crc kubenswrapper[4753]: I0129 12:31:22.566890 4753 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/5fc44961-6be6-4605-ba44-397260d294b9-etc-swift\") on node \"crc\" DevicePath \"\"" Jan 29 12:31:22 crc kubenswrapper[4753]: I0129 12:31:22.566929 4753 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/5fc44961-6be6-4605-ba44-397260d294b9-ring-data-devices\") on node \"crc\" DevicePath \"\"" Jan 29 12:31:22 crc kubenswrapper[4753]: I0129 12:31:22.566942 4753 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/5fc44961-6be6-4605-ba44-397260d294b9-swiftconf\") on node \"crc\" DevicePath \"\"" Jan 29 12:31:22 crc kubenswrapper[4753]: I0129 12:31:22.566952 4753 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5fc44961-6be6-4605-ba44-397260d294b9-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 12:31:22 crc kubenswrapper[4753]: I0129 12:31:22.566961 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zh8ws\" (UniqueName: \"kubernetes.io/projected/5fc44961-6be6-4605-ba44-397260d294b9-kube-api-access-zh8ws\") on node \"crc\" DevicePath \"\"" Jan 29 12:31:22 crc kubenswrapper[4753]: I0129 12:31:22.566969 4753 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/5fc44961-6be6-4605-ba44-397260d294b9-dispersionconf\") on node \"crc\" DevicePath \"\"" Jan 29 12:31:22 crc kubenswrapper[4753]: I0129 12:31:22.762895 4753 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ee9eef532d6c5987cd2ba63faab76f0f24617a2dff848bee9181dd51f9244170" Jan 29 12:31:22 crc kubenswrapper[4753]: I0129 12:31:22.762930 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-ring-rebalance-debug-h2p96" Jan 29 12:31:22 crc kubenswrapper[4753]: I0129 12:31:22.820493 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["swift-kuttl-tests/swift-ring-rebalance-debug-m8z8m"] Jan 29 12:31:22 crc kubenswrapper[4753]: E0129 12:31:22.820912 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5fc44961-6be6-4605-ba44-397260d294b9" containerName="swift-ring-rebalance" Jan 29 12:31:22 crc kubenswrapper[4753]: I0129 12:31:22.820934 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="5fc44961-6be6-4605-ba44-397260d294b9" containerName="swift-ring-rebalance" Jan 29 12:31:22 crc kubenswrapper[4753]: I0129 12:31:22.821143 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="5fc44961-6be6-4605-ba44-397260d294b9" containerName="swift-ring-rebalance" Jan 29 12:31:22 crc kubenswrapper[4753]: I0129 12:31:22.821890 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-ring-rebalance-debug-m8z8m" Jan 29 12:31:22 crc kubenswrapper[4753]: I0129 12:31:22.823932 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"swift-kuttl-tests"/"swift-ring-config-data" Jan 29 12:31:22 crc kubenswrapper[4753]: I0129 12:31:22.825217 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"swift-kuttl-tests"/"swift-ring-scripts" Jan 29 12:31:22 crc kubenswrapper[4753]: I0129 12:31:22.831976 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/swift-ring-rebalance-debug-m8z8m"] Jan 29 12:31:22 crc kubenswrapper[4753]: I0129 12:31:22.972867 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/e78cde12-c427-4dce-86a0-2ba673222bd1-ring-data-devices\") pod \"swift-ring-rebalance-debug-m8z8m\" (UID: \"e78cde12-c427-4dce-86a0-2ba673222bd1\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-m8z8m" Jan 29 12:31:22 crc kubenswrapper[4753]: I0129 12:31:22.972944 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cv9sq\" (UniqueName: \"kubernetes.io/projected/e78cde12-c427-4dce-86a0-2ba673222bd1-kube-api-access-cv9sq\") pod \"swift-ring-rebalance-debug-m8z8m\" (UID: \"e78cde12-c427-4dce-86a0-2ba673222bd1\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-m8z8m" Jan 29 12:31:22 crc kubenswrapper[4753]: I0129 12:31:22.972993 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e78cde12-c427-4dce-86a0-2ba673222bd1-scripts\") pod \"swift-ring-rebalance-debug-m8z8m\" (UID: \"e78cde12-c427-4dce-86a0-2ba673222bd1\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-m8z8m" Jan 29 12:31:22 crc kubenswrapper[4753]: I0129 12:31:22.973034 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/e78cde12-c427-4dce-86a0-2ba673222bd1-dispersionconf\") pod \"swift-ring-rebalance-debug-m8z8m\" (UID: \"e78cde12-c427-4dce-86a0-2ba673222bd1\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-m8z8m" Jan 29 12:31:22 crc kubenswrapper[4753]: I0129 12:31:22.973087 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/e78cde12-c427-4dce-86a0-2ba673222bd1-etc-swift\") pod \"swift-ring-rebalance-debug-m8z8m\" (UID: \"e78cde12-c427-4dce-86a0-2ba673222bd1\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-m8z8m" Jan 29 12:31:22 crc kubenswrapper[4753]: I0129 12:31:22.973115 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/e78cde12-c427-4dce-86a0-2ba673222bd1-swiftconf\") pod \"swift-ring-rebalance-debug-m8z8m\" (UID: \"e78cde12-c427-4dce-86a0-2ba673222bd1\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-m8z8m" Jan 29 12:31:23 crc kubenswrapper[4753]: I0129 12:31:23.092427 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/e78cde12-c427-4dce-86a0-2ba673222bd1-dispersionconf\") pod \"swift-ring-rebalance-debug-m8z8m\" (UID: \"e78cde12-c427-4dce-86a0-2ba673222bd1\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-m8z8m" Jan 29 12:31:23 crc kubenswrapper[4753]: I0129 12:31:23.092880 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/e78cde12-c427-4dce-86a0-2ba673222bd1-etc-swift\") pod \"swift-ring-rebalance-debug-m8z8m\" (UID: \"e78cde12-c427-4dce-86a0-2ba673222bd1\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-m8z8m" Jan 29 12:31:23 crc kubenswrapper[4753]: I0129 12:31:23.093019 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/e78cde12-c427-4dce-86a0-2ba673222bd1-swiftconf\") pod \"swift-ring-rebalance-debug-m8z8m\" (UID: \"e78cde12-c427-4dce-86a0-2ba673222bd1\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-m8z8m" Jan 29 12:31:23 crc kubenswrapper[4753]: I0129 12:31:23.093202 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/e78cde12-c427-4dce-86a0-2ba673222bd1-ring-data-devices\") pod \"swift-ring-rebalance-debug-m8z8m\" (UID: \"e78cde12-c427-4dce-86a0-2ba673222bd1\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-m8z8m" Jan 29 12:31:23 crc kubenswrapper[4753]: I0129 12:31:23.093371 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cv9sq\" (UniqueName: \"kubernetes.io/projected/e78cde12-c427-4dce-86a0-2ba673222bd1-kube-api-access-cv9sq\") pod \"swift-ring-rebalance-debug-m8z8m\" (UID: \"e78cde12-c427-4dce-86a0-2ba673222bd1\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-m8z8m" Jan 29 12:31:23 crc kubenswrapper[4753]: I0129 12:31:23.093488 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e78cde12-c427-4dce-86a0-2ba673222bd1-scripts\") pod \"swift-ring-rebalance-debug-m8z8m\" (UID: \"e78cde12-c427-4dce-86a0-2ba673222bd1\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-m8z8m" Jan 29 12:31:23 crc kubenswrapper[4753]: I0129 12:31:23.093385 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/e78cde12-c427-4dce-86a0-2ba673222bd1-etc-swift\") pod \"swift-ring-rebalance-debug-m8z8m\" (UID: \"e78cde12-c427-4dce-86a0-2ba673222bd1\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-m8z8m" Jan 29 12:31:23 crc kubenswrapper[4753]: I0129 12:31:23.094141 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/e78cde12-c427-4dce-86a0-2ba673222bd1-ring-data-devices\") pod \"swift-ring-rebalance-debug-m8z8m\" (UID: \"e78cde12-c427-4dce-86a0-2ba673222bd1\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-m8z8m" Jan 29 12:31:23 crc kubenswrapper[4753]: I0129 12:31:23.095093 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e78cde12-c427-4dce-86a0-2ba673222bd1-scripts\") pod \"swift-ring-rebalance-debug-m8z8m\" (UID: \"e78cde12-c427-4dce-86a0-2ba673222bd1\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-m8z8m" Jan 29 12:31:23 crc kubenswrapper[4753]: I0129 12:31:23.095692 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/e78cde12-c427-4dce-86a0-2ba673222bd1-swiftconf\") pod \"swift-ring-rebalance-debug-m8z8m\" (UID: \"e78cde12-c427-4dce-86a0-2ba673222bd1\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-m8z8m" Jan 29 12:31:23 crc kubenswrapper[4753]: I0129 12:31:23.096009 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/e78cde12-c427-4dce-86a0-2ba673222bd1-dispersionconf\") pod \"swift-ring-rebalance-debug-m8z8m\" (UID: \"e78cde12-c427-4dce-86a0-2ba673222bd1\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-m8z8m" Jan 29 12:31:23 crc kubenswrapper[4753]: I0129 12:31:23.113049 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cv9sq\" (UniqueName: \"kubernetes.io/projected/e78cde12-c427-4dce-86a0-2ba673222bd1-kube-api-access-cv9sq\") pod \"swift-ring-rebalance-debug-m8z8m\" (UID: \"e78cde12-c427-4dce-86a0-2ba673222bd1\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-m8z8m" Jan 29 12:31:23 crc kubenswrapper[4753]: I0129 12:31:23.155766 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-ring-rebalance-debug-m8z8m" Jan 29 12:31:23 crc kubenswrapper[4753]: W0129 12:31:23.402212 4753 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode78cde12_c427_4dce_86a0_2ba673222bd1.slice/crio-3e2b1384eeabf7476c2b26790dffff6e9e53bd02f88a2925ff96c484557fad8e WatchSource:0}: Error finding container 3e2b1384eeabf7476c2b26790dffff6e9e53bd02f88a2925ff96c484557fad8e: Status 404 returned error can't find the container with id 3e2b1384eeabf7476c2b26790dffff6e9e53bd02f88a2925ff96c484557fad8e Jan 29 12:31:23 crc kubenswrapper[4753]: I0129 12:31:23.402351 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/swift-ring-rebalance-debug-m8z8m"] Jan 29 12:31:23 crc kubenswrapper[4753]: I0129 12:31:23.774890 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-ring-rebalance-debug-m8z8m" event={"ID":"e78cde12-c427-4dce-86a0-2ba673222bd1","Type":"ContainerStarted","Data":"3e2b1384eeabf7476c2b26790dffff6e9e53bd02f88a2925ff96c484557fad8e"} Jan 29 12:31:23 crc kubenswrapper[4753]: I0129 12:31:23.900349 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fc44961-6be6-4605-ba44-397260d294b9" path="/var/lib/kubelet/pods/5fc44961-6be6-4605-ba44-397260d294b9/volumes" Jan 29 12:31:24 crc kubenswrapper[4753]: I0129 12:31:24.784665 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-ring-rebalance-debug-m8z8m" event={"ID":"e78cde12-c427-4dce-86a0-2ba673222bd1","Type":"ContainerStarted","Data":"b626d4cf4f9f12b19febd2612ba28c38240c1ae05ccad16175116f001f711318"} Jan 29 12:31:24 crc kubenswrapper[4753]: I0129 12:31:24.807049 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="swift-kuttl-tests/swift-ring-rebalance-debug-m8z8m" podStartSLOduration=2.807024815 podStartE2EDuration="2.807024815s" podCreationTimestamp="2026-01-29 12:31:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:31:24.801303632 +0000 UTC m=+1499.053385087" watchObservedRunningTime="2026-01-29 12:31:24.807024815 +0000 UTC m=+1499.059106270" Jan 29 12:31:26 crc kubenswrapper[4753]: I0129 12:31:26.810078 4753 generic.go:334] "Generic (PLEG): container finished" podID="e78cde12-c427-4dce-86a0-2ba673222bd1" containerID="b626d4cf4f9f12b19febd2612ba28c38240c1ae05ccad16175116f001f711318" exitCode=0 Jan 29 12:31:26 crc kubenswrapper[4753]: I0129 12:31:26.810170 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-ring-rebalance-debug-m8z8m" event={"ID":"e78cde12-c427-4dce-86a0-2ba673222bd1","Type":"ContainerDied","Data":"b626d4cf4f9f12b19febd2612ba28c38240c1ae05ccad16175116f001f711318"} Jan 29 12:31:28 crc kubenswrapper[4753]: I0129 12:31:28.207504 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-ring-rebalance-debug-m8z8m" Jan 29 12:31:28 crc kubenswrapper[4753]: I0129 12:31:28.249110 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["swift-kuttl-tests/swift-ring-rebalance-debug-m8z8m"] Jan 29 12:31:28 crc kubenswrapper[4753]: I0129 12:31:28.258781 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["swift-kuttl-tests/swift-ring-rebalance-debug-m8z8m"] Jan 29 12:31:28 crc kubenswrapper[4753]: I0129 12:31:28.371975 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/e78cde12-c427-4dce-86a0-2ba673222bd1-dispersionconf\") pod \"e78cde12-c427-4dce-86a0-2ba673222bd1\" (UID: \"e78cde12-c427-4dce-86a0-2ba673222bd1\") " Jan 29 12:31:28 crc kubenswrapper[4753]: I0129 12:31:28.372065 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/e78cde12-c427-4dce-86a0-2ba673222bd1-etc-swift\") pod \"e78cde12-c427-4dce-86a0-2ba673222bd1\" (UID: \"e78cde12-c427-4dce-86a0-2ba673222bd1\") " Jan 29 12:31:28 crc kubenswrapper[4753]: I0129 12:31:28.372116 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/e78cde12-c427-4dce-86a0-2ba673222bd1-swiftconf\") pod \"e78cde12-c427-4dce-86a0-2ba673222bd1\" (UID: \"e78cde12-c427-4dce-86a0-2ba673222bd1\") " Jan 29 12:31:28 crc kubenswrapper[4753]: I0129 12:31:28.372180 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cv9sq\" (UniqueName: \"kubernetes.io/projected/e78cde12-c427-4dce-86a0-2ba673222bd1-kube-api-access-cv9sq\") pod \"e78cde12-c427-4dce-86a0-2ba673222bd1\" (UID: \"e78cde12-c427-4dce-86a0-2ba673222bd1\") " Jan 29 12:31:28 crc kubenswrapper[4753]: I0129 12:31:28.372318 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e78cde12-c427-4dce-86a0-2ba673222bd1-scripts\") pod \"e78cde12-c427-4dce-86a0-2ba673222bd1\" (UID: \"e78cde12-c427-4dce-86a0-2ba673222bd1\") " Jan 29 12:31:28 crc kubenswrapper[4753]: I0129 12:31:28.372350 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/e78cde12-c427-4dce-86a0-2ba673222bd1-ring-data-devices\") pod \"e78cde12-c427-4dce-86a0-2ba673222bd1\" (UID: \"e78cde12-c427-4dce-86a0-2ba673222bd1\") " Jan 29 12:31:28 crc kubenswrapper[4753]: I0129 12:31:28.373255 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e78cde12-c427-4dce-86a0-2ba673222bd1-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "e78cde12-c427-4dce-86a0-2ba673222bd1" (UID: "e78cde12-c427-4dce-86a0-2ba673222bd1"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:31:28 crc kubenswrapper[4753]: I0129 12:31:28.373337 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e78cde12-c427-4dce-86a0-2ba673222bd1-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "e78cde12-c427-4dce-86a0-2ba673222bd1" (UID: "e78cde12-c427-4dce-86a0-2ba673222bd1"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:31:28 crc kubenswrapper[4753]: I0129 12:31:28.378239 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e78cde12-c427-4dce-86a0-2ba673222bd1-kube-api-access-cv9sq" (OuterVolumeSpecName: "kube-api-access-cv9sq") pod "e78cde12-c427-4dce-86a0-2ba673222bd1" (UID: "e78cde12-c427-4dce-86a0-2ba673222bd1"). InnerVolumeSpecName "kube-api-access-cv9sq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:31:28 crc kubenswrapper[4753]: I0129 12:31:28.396170 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e78cde12-c427-4dce-86a0-2ba673222bd1-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "e78cde12-c427-4dce-86a0-2ba673222bd1" (UID: "e78cde12-c427-4dce-86a0-2ba673222bd1"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:31:28 crc kubenswrapper[4753]: I0129 12:31:28.396768 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e78cde12-c427-4dce-86a0-2ba673222bd1-scripts" (OuterVolumeSpecName: "scripts") pod "e78cde12-c427-4dce-86a0-2ba673222bd1" (UID: "e78cde12-c427-4dce-86a0-2ba673222bd1"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:31:28 crc kubenswrapper[4753]: I0129 12:31:28.401872 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e78cde12-c427-4dce-86a0-2ba673222bd1-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "e78cde12-c427-4dce-86a0-2ba673222bd1" (UID: "e78cde12-c427-4dce-86a0-2ba673222bd1"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:31:28 crc kubenswrapper[4753]: I0129 12:31:28.474993 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cv9sq\" (UniqueName: \"kubernetes.io/projected/e78cde12-c427-4dce-86a0-2ba673222bd1-kube-api-access-cv9sq\") on node \"crc\" DevicePath \"\"" Jan 29 12:31:28 crc kubenswrapper[4753]: I0129 12:31:28.475056 4753 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e78cde12-c427-4dce-86a0-2ba673222bd1-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 12:31:28 crc kubenswrapper[4753]: I0129 12:31:28.475074 4753 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/e78cde12-c427-4dce-86a0-2ba673222bd1-ring-data-devices\") on node \"crc\" DevicePath \"\"" Jan 29 12:31:28 crc kubenswrapper[4753]: I0129 12:31:28.475089 4753 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/e78cde12-c427-4dce-86a0-2ba673222bd1-dispersionconf\") on node \"crc\" DevicePath \"\"" Jan 29 12:31:28 crc kubenswrapper[4753]: I0129 12:31:28.475105 4753 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/e78cde12-c427-4dce-86a0-2ba673222bd1-etc-swift\") on node \"crc\" DevicePath \"\"" Jan 29 12:31:28 crc kubenswrapper[4753]: I0129 12:31:28.475118 4753 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/e78cde12-c427-4dce-86a0-2ba673222bd1-swiftconf\") on node \"crc\" DevicePath \"\"" Jan 29 12:31:28 crc kubenswrapper[4753]: I0129 12:31:28.830186 4753 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3e2b1384eeabf7476c2b26790dffff6e9e53bd02f88a2925ff96c484557fad8e" Jan 29 12:31:28 crc kubenswrapper[4753]: I0129 12:31:28.830296 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-ring-rebalance-debug-m8z8m" Jan 29 12:31:29 crc kubenswrapper[4753]: I0129 12:31:29.407700 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["swift-kuttl-tests/swift-ring-rebalance-debug-n8nz4"] Jan 29 12:31:29 crc kubenswrapper[4753]: E0129 12:31:29.408547 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e78cde12-c427-4dce-86a0-2ba673222bd1" containerName="swift-ring-rebalance" Jan 29 12:31:29 crc kubenswrapper[4753]: I0129 12:31:29.408565 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="e78cde12-c427-4dce-86a0-2ba673222bd1" containerName="swift-ring-rebalance" Jan 29 12:31:29 crc kubenswrapper[4753]: I0129 12:31:29.408815 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="e78cde12-c427-4dce-86a0-2ba673222bd1" containerName="swift-ring-rebalance" Jan 29 12:31:29 crc kubenswrapper[4753]: I0129 12:31:29.409555 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-ring-rebalance-debug-n8nz4" Jan 29 12:31:29 crc kubenswrapper[4753]: I0129 12:31:29.415118 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"swift-kuttl-tests"/"swift-ring-scripts" Jan 29 12:31:29 crc kubenswrapper[4753]: I0129 12:31:29.415310 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"swift-kuttl-tests"/"swift-ring-config-data" Jan 29 12:31:29 crc kubenswrapper[4753]: I0129 12:31:29.417996 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/swift-ring-rebalance-debug-n8nz4"] Jan 29 12:31:29 crc kubenswrapper[4753]: I0129 12:31:29.482678 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8bc741ab-9534-404d-a356-e4cfc92ddcd5-scripts\") pod \"swift-ring-rebalance-debug-n8nz4\" (UID: \"8bc741ab-9534-404d-a356-e4cfc92ddcd5\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-n8nz4" Jan 29 12:31:29 crc kubenswrapper[4753]: I0129 12:31:29.482804 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/8bc741ab-9534-404d-a356-e4cfc92ddcd5-swiftconf\") pod \"swift-ring-rebalance-debug-n8nz4\" (UID: \"8bc741ab-9534-404d-a356-e4cfc92ddcd5\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-n8nz4" Jan 29 12:31:29 crc kubenswrapper[4753]: I0129 12:31:29.482838 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5x7q8\" (UniqueName: \"kubernetes.io/projected/8bc741ab-9534-404d-a356-e4cfc92ddcd5-kube-api-access-5x7q8\") pod \"swift-ring-rebalance-debug-n8nz4\" (UID: \"8bc741ab-9534-404d-a356-e4cfc92ddcd5\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-n8nz4" Jan 29 12:31:29 crc kubenswrapper[4753]: I0129 12:31:29.482896 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/8bc741ab-9534-404d-a356-e4cfc92ddcd5-etc-swift\") pod \"swift-ring-rebalance-debug-n8nz4\" (UID: \"8bc741ab-9534-404d-a356-e4cfc92ddcd5\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-n8nz4" Jan 29 12:31:29 crc kubenswrapper[4753]: I0129 12:31:29.482962 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/8bc741ab-9534-404d-a356-e4cfc92ddcd5-dispersionconf\") pod \"swift-ring-rebalance-debug-n8nz4\" (UID: \"8bc741ab-9534-404d-a356-e4cfc92ddcd5\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-n8nz4" Jan 29 12:31:29 crc kubenswrapper[4753]: I0129 12:31:29.482996 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/8bc741ab-9534-404d-a356-e4cfc92ddcd5-ring-data-devices\") pod \"swift-ring-rebalance-debug-n8nz4\" (UID: \"8bc741ab-9534-404d-a356-e4cfc92ddcd5\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-n8nz4" Jan 29 12:31:29 crc kubenswrapper[4753]: I0129 12:31:29.585110 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8bc741ab-9534-404d-a356-e4cfc92ddcd5-scripts\") pod \"swift-ring-rebalance-debug-n8nz4\" (UID: \"8bc741ab-9534-404d-a356-e4cfc92ddcd5\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-n8nz4" Jan 29 12:31:29 crc kubenswrapper[4753]: I0129 12:31:29.585177 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/8bc741ab-9534-404d-a356-e4cfc92ddcd5-swiftconf\") pod \"swift-ring-rebalance-debug-n8nz4\" (UID: \"8bc741ab-9534-404d-a356-e4cfc92ddcd5\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-n8nz4" Jan 29 12:31:29 crc kubenswrapper[4753]: I0129 12:31:29.585215 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5x7q8\" (UniqueName: \"kubernetes.io/projected/8bc741ab-9534-404d-a356-e4cfc92ddcd5-kube-api-access-5x7q8\") pod \"swift-ring-rebalance-debug-n8nz4\" (UID: \"8bc741ab-9534-404d-a356-e4cfc92ddcd5\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-n8nz4" Jan 29 12:31:29 crc kubenswrapper[4753]: I0129 12:31:29.585314 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/8bc741ab-9534-404d-a356-e4cfc92ddcd5-etc-swift\") pod \"swift-ring-rebalance-debug-n8nz4\" (UID: \"8bc741ab-9534-404d-a356-e4cfc92ddcd5\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-n8nz4" Jan 29 12:31:29 crc kubenswrapper[4753]: I0129 12:31:29.585354 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/8bc741ab-9534-404d-a356-e4cfc92ddcd5-dispersionconf\") pod \"swift-ring-rebalance-debug-n8nz4\" (UID: \"8bc741ab-9534-404d-a356-e4cfc92ddcd5\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-n8nz4" Jan 29 12:31:29 crc kubenswrapper[4753]: I0129 12:31:29.585431 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/8bc741ab-9534-404d-a356-e4cfc92ddcd5-ring-data-devices\") pod \"swift-ring-rebalance-debug-n8nz4\" (UID: \"8bc741ab-9534-404d-a356-e4cfc92ddcd5\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-n8nz4" Jan 29 12:31:29 crc kubenswrapper[4753]: I0129 12:31:29.586142 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/8bc741ab-9534-404d-a356-e4cfc92ddcd5-etc-swift\") pod \"swift-ring-rebalance-debug-n8nz4\" (UID: \"8bc741ab-9534-404d-a356-e4cfc92ddcd5\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-n8nz4" Jan 29 12:31:29 crc kubenswrapper[4753]: I0129 12:31:29.586711 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8bc741ab-9534-404d-a356-e4cfc92ddcd5-scripts\") pod \"swift-ring-rebalance-debug-n8nz4\" (UID: \"8bc741ab-9534-404d-a356-e4cfc92ddcd5\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-n8nz4" Jan 29 12:31:29 crc kubenswrapper[4753]: I0129 12:31:29.586803 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/8bc741ab-9534-404d-a356-e4cfc92ddcd5-ring-data-devices\") pod \"swift-ring-rebalance-debug-n8nz4\" (UID: \"8bc741ab-9534-404d-a356-e4cfc92ddcd5\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-n8nz4" Jan 29 12:31:29 crc kubenswrapper[4753]: I0129 12:31:29.591759 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/8bc741ab-9534-404d-a356-e4cfc92ddcd5-dispersionconf\") pod \"swift-ring-rebalance-debug-n8nz4\" (UID: \"8bc741ab-9534-404d-a356-e4cfc92ddcd5\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-n8nz4" Jan 29 12:31:29 crc kubenswrapper[4753]: I0129 12:31:29.592078 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/8bc741ab-9534-404d-a356-e4cfc92ddcd5-swiftconf\") pod \"swift-ring-rebalance-debug-n8nz4\" (UID: \"8bc741ab-9534-404d-a356-e4cfc92ddcd5\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-n8nz4" Jan 29 12:31:29 crc kubenswrapper[4753]: I0129 12:31:29.617788 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5x7q8\" (UniqueName: \"kubernetes.io/projected/8bc741ab-9534-404d-a356-e4cfc92ddcd5-kube-api-access-5x7q8\") pod \"swift-ring-rebalance-debug-n8nz4\" (UID: \"8bc741ab-9534-404d-a356-e4cfc92ddcd5\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-n8nz4" Jan 29 12:31:29 crc kubenswrapper[4753]: I0129 12:31:29.939393 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-ring-rebalance-debug-n8nz4" Jan 29 12:31:29 crc kubenswrapper[4753]: I0129 12:31:29.955817 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e78cde12-c427-4dce-86a0-2ba673222bd1" path="/var/lib/kubelet/pods/e78cde12-c427-4dce-86a0-2ba673222bd1/volumes" Jan 29 12:31:30 crc kubenswrapper[4753]: I0129 12:31:30.500957 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/swift-ring-rebalance-debug-n8nz4"] Jan 29 12:31:31 crc kubenswrapper[4753]: I0129 12:31:31.006599 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-ring-rebalance-debug-n8nz4" event={"ID":"8bc741ab-9534-404d-a356-e4cfc92ddcd5","Type":"ContainerStarted","Data":"d960e7df489abcbee5c2ce5c0b0cfbaf0f0c2dac4e26fbf6f15ad27ca9f3ea32"} Jan 29 12:31:31 crc kubenswrapper[4753]: I0129 12:31:31.006853 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-ring-rebalance-debug-n8nz4" event={"ID":"8bc741ab-9534-404d-a356-e4cfc92ddcd5","Type":"ContainerStarted","Data":"398096b5040a1c9e3207b475745e9541c04d95a894c30154083ada8805be9ed5"} Jan 29 12:31:31 crc kubenswrapper[4753]: I0129 12:31:31.029633 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="swift-kuttl-tests/swift-ring-rebalance-debug-n8nz4" podStartSLOduration=2.029603337 podStartE2EDuration="2.029603337s" podCreationTimestamp="2026-01-29 12:31:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:31:31.027863727 +0000 UTC m=+1505.279945182" watchObservedRunningTime="2026-01-29 12:31:31.029603337 +0000 UTC m=+1505.281684792" Jan 29 12:31:33 crc kubenswrapper[4753]: I0129 12:31:33.029114 4753 generic.go:334] "Generic (PLEG): container finished" podID="8bc741ab-9534-404d-a356-e4cfc92ddcd5" containerID="d960e7df489abcbee5c2ce5c0b0cfbaf0f0c2dac4e26fbf6f15ad27ca9f3ea32" exitCode=0 Jan 29 12:31:33 crc kubenswrapper[4753]: I0129 12:31:33.029512 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-ring-rebalance-debug-n8nz4" event={"ID":"8bc741ab-9534-404d-a356-e4cfc92ddcd5","Type":"ContainerDied","Data":"d960e7df489abcbee5c2ce5c0b0cfbaf0f0c2dac4e26fbf6f15ad27ca9f3ea32"} Jan 29 12:31:34 crc kubenswrapper[4753]: I0129 12:31:34.380381 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-ring-rebalance-debug-n8nz4" Jan 29 12:31:34 crc kubenswrapper[4753]: I0129 12:31:34.415763 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["swift-kuttl-tests/swift-ring-rebalance-debug-n8nz4"] Jan 29 12:31:34 crc kubenswrapper[4753]: I0129 12:31:34.422207 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["swift-kuttl-tests/swift-ring-rebalance-debug-n8nz4"] Jan 29 12:31:34 crc kubenswrapper[4753]: I0129 12:31:34.540741 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5x7q8\" (UniqueName: \"kubernetes.io/projected/8bc741ab-9534-404d-a356-e4cfc92ddcd5-kube-api-access-5x7q8\") pod \"8bc741ab-9534-404d-a356-e4cfc92ddcd5\" (UID: \"8bc741ab-9534-404d-a356-e4cfc92ddcd5\") " Jan 29 12:31:34 crc kubenswrapper[4753]: I0129 12:31:34.540842 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/8bc741ab-9534-404d-a356-e4cfc92ddcd5-ring-data-devices\") pod \"8bc741ab-9534-404d-a356-e4cfc92ddcd5\" (UID: \"8bc741ab-9534-404d-a356-e4cfc92ddcd5\") " Jan 29 12:31:34 crc kubenswrapper[4753]: I0129 12:31:34.540938 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/8bc741ab-9534-404d-a356-e4cfc92ddcd5-dispersionconf\") pod \"8bc741ab-9534-404d-a356-e4cfc92ddcd5\" (UID: \"8bc741ab-9534-404d-a356-e4cfc92ddcd5\") " Jan 29 12:31:34 crc kubenswrapper[4753]: I0129 12:31:34.541053 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/8bc741ab-9534-404d-a356-e4cfc92ddcd5-swiftconf\") pod \"8bc741ab-9534-404d-a356-e4cfc92ddcd5\" (UID: \"8bc741ab-9534-404d-a356-e4cfc92ddcd5\") " Jan 29 12:31:34 crc kubenswrapper[4753]: I0129 12:31:34.541448 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8bc741ab-9534-404d-a356-e4cfc92ddcd5-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "8bc741ab-9534-404d-a356-e4cfc92ddcd5" (UID: "8bc741ab-9534-404d-a356-e4cfc92ddcd5"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:31:34 crc kubenswrapper[4753]: I0129 12:31:34.541913 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8bc741ab-9534-404d-a356-e4cfc92ddcd5-scripts\") pod \"8bc741ab-9534-404d-a356-e4cfc92ddcd5\" (UID: \"8bc741ab-9534-404d-a356-e4cfc92ddcd5\") " Jan 29 12:31:34 crc kubenswrapper[4753]: I0129 12:31:34.542061 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/8bc741ab-9534-404d-a356-e4cfc92ddcd5-etc-swift\") pod \"8bc741ab-9534-404d-a356-e4cfc92ddcd5\" (UID: \"8bc741ab-9534-404d-a356-e4cfc92ddcd5\") " Jan 29 12:31:34 crc kubenswrapper[4753]: I0129 12:31:34.542548 4753 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/8bc741ab-9534-404d-a356-e4cfc92ddcd5-ring-data-devices\") on node \"crc\" DevicePath \"\"" Jan 29 12:31:34 crc kubenswrapper[4753]: I0129 12:31:34.542859 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8bc741ab-9534-404d-a356-e4cfc92ddcd5-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "8bc741ab-9534-404d-a356-e4cfc92ddcd5" (UID: "8bc741ab-9534-404d-a356-e4cfc92ddcd5"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:31:34 crc kubenswrapper[4753]: I0129 12:31:34.547730 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8bc741ab-9534-404d-a356-e4cfc92ddcd5-kube-api-access-5x7q8" (OuterVolumeSpecName: "kube-api-access-5x7q8") pod "8bc741ab-9534-404d-a356-e4cfc92ddcd5" (UID: "8bc741ab-9534-404d-a356-e4cfc92ddcd5"). InnerVolumeSpecName "kube-api-access-5x7q8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:31:34 crc kubenswrapper[4753]: I0129 12:31:34.564155 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8bc741ab-9534-404d-a356-e4cfc92ddcd5-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "8bc741ab-9534-404d-a356-e4cfc92ddcd5" (UID: "8bc741ab-9534-404d-a356-e4cfc92ddcd5"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:31:34 crc kubenswrapper[4753]: I0129 12:31:34.569542 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8bc741ab-9534-404d-a356-e4cfc92ddcd5-scripts" (OuterVolumeSpecName: "scripts") pod "8bc741ab-9534-404d-a356-e4cfc92ddcd5" (UID: "8bc741ab-9534-404d-a356-e4cfc92ddcd5"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:31:34 crc kubenswrapper[4753]: I0129 12:31:34.570755 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8bc741ab-9534-404d-a356-e4cfc92ddcd5-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "8bc741ab-9534-404d-a356-e4cfc92ddcd5" (UID: "8bc741ab-9534-404d-a356-e4cfc92ddcd5"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:31:34 crc kubenswrapper[4753]: I0129 12:31:34.644134 4753 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8bc741ab-9534-404d-a356-e4cfc92ddcd5-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 12:31:34 crc kubenswrapper[4753]: I0129 12:31:34.644173 4753 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/8bc741ab-9534-404d-a356-e4cfc92ddcd5-etc-swift\") on node \"crc\" DevicePath \"\"" Jan 29 12:31:34 crc kubenswrapper[4753]: I0129 12:31:34.644191 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5x7q8\" (UniqueName: \"kubernetes.io/projected/8bc741ab-9534-404d-a356-e4cfc92ddcd5-kube-api-access-5x7q8\") on node \"crc\" DevicePath \"\"" Jan 29 12:31:34 crc kubenswrapper[4753]: I0129 12:31:34.644206 4753 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/8bc741ab-9534-404d-a356-e4cfc92ddcd5-dispersionconf\") on node \"crc\" DevicePath \"\"" Jan 29 12:31:34 crc kubenswrapper[4753]: I0129 12:31:34.644219 4753 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/8bc741ab-9534-404d-a356-e4cfc92ddcd5-swiftconf\") on node \"crc\" DevicePath \"\"" Jan 29 12:31:35 crc kubenswrapper[4753]: I0129 12:31:35.053080 4753 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="398096b5040a1c9e3207b475745e9541c04d95a894c30154083ada8805be9ed5" Jan 29 12:31:35 crc kubenswrapper[4753]: I0129 12:31:35.053137 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-ring-rebalance-debug-n8nz4" Jan 29 12:31:35 crc kubenswrapper[4753]: I0129 12:31:35.753806 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["swift-kuttl-tests/swift-ring-rebalance-debug-c5khd"] Jan 29 12:31:35 crc kubenswrapper[4753]: E0129 12:31:35.754601 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8bc741ab-9534-404d-a356-e4cfc92ddcd5" containerName="swift-ring-rebalance" Jan 29 12:31:35 crc kubenswrapper[4753]: I0129 12:31:35.754622 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="8bc741ab-9534-404d-a356-e4cfc92ddcd5" containerName="swift-ring-rebalance" Jan 29 12:31:35 crc kubenswrapper[4753]: I0129 12:31:35.754861 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="8bc741ab-9534-404d-a356-e4cfc92ddcd5" containerName="swift-ring-rebalance" Jan 29 12:31:35 crc kubenswrapper[4753]: I0129 12:31:35.755627 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-ring-rebalance-debug-c5khd" Jan 29 12:31:35 crc kubenswrapper[4753]: I0129 12:31:35.759013 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"swift-kuttl-tests"/"swift-ring-config-data" Jan 29 12:31:35 crc kubenswrapper[4753]: I0129 12:31:35.759295 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"swift-kuttl-tests"/"swift-ring-scripts" Jan 29 12:31:35 crc kubenswrapper[4753]: I0129 12:31:35.772013 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/swift-ring-rebalance-debug-c5khd"] Jan 29 12:31:35 crc kubenswrapper[4753]: I0129 12:31:35.853423 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/e4000d05-91fb-44b6-a38f-afa7b616dd8b-etc-swift\") pod \"swift-ring-rebalance-debug-c5khd\" (UID: \"e4000d05-91fb-44b6-a38f-afa7b616dd8b\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-c5khd" Jan 29 12:31:35 crc kubenswrapper[4753]: I0129 12:31:35.853473 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rv6v4\" (UniqueName: \"kubernetes.io/projected/e4000d05-91fb-44b6-a38f-afa7b616dd8b-kube-api-access-rv6v4\") pod \"swift-ring-rebalance-debug-c5khd\" (UID: \"e4000d05-91fb-44b6-a38f-afa7b616dd8b\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-c5khd" Jan 29 12:31:35 crc kubenswrapper[4753]: I0129 12:31:35.853502 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/e4000d05-91fb-44b6-a38f-afa7b616dd8b-swiftconf\") pod \"swift-ring-rebalance-debug-c5khd\" (UID: \"e4000d05-91fb-44b6-a38f-afa7b616dd8b\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-c5khd" Jan 29 12:31:35 crc kubenswrapper[4753]: I0129 12:31:35.853748 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e4000d05-91fb-44b6-a38f-afa7b616dd8b-scripts\") pod \"swift-ring-rebalance-debug-c5khd\" (UID: \"e4000d05-91fb-44b6-a38f-afa7b616dd8b\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-c5khd" Jan 29 12:31:35 crc kubenswrapper[4753]: I0129 12:31:35.854087 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/e4000d05-91fb-44b6-a38f-afa7b616dd8b-dispersionconf\") pod \"swift-ring-rebalance-debug-c5khd\" (UID: \"e4000d05-91fb-44b6-a38f-afa7b616dd8b\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-c5khd" Jan 29 12:31:35 crc kubenswrapper[4753]: I0129 12:31:35.854148 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/e4000d05-91fb-44b6-a38f-afa7b616dd8b-ring-data-devices\") pod \"swift-ring-rebalance-debug-c5khd\" (UID: \"e4000d05-91fb-44b6-a38f-afa7b616dd8b\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-c5khd" Jan 29 12:31:35 crc kubenswrapper[4753]: I0129 12:31:35.905870 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8bc741ab-9534-404d-a356-e4cfc92ddcd5" path="/var/lib/kubelet/pods/8bc741ab-9534-404d-a356-e4cfc92ddcd5/volumes" Jan 29 12:31:35 crc kubenswrapper[4753]: I0129 12:31:35.955143 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/e4000d05-91fb-44b6-a38f-afa7b616dd8b-dispersionconf\") pod \"swift-ring-rebalance-debug-c5khd\" (UID: \"e4000d05-91fb-44b6-a38f-afa7b616dd8b\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-c5khd" Jan 29 12:31:35 crc kubenswrapper[4753]: I0129 12:31:35.955222 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/e4000d05-91fb-44b6-a38f-afa7b616dd8b-ring-data-devices\") pod \"swift-ring-rebalance-debug-c5khd\" (UID: \"e4000d05-91fb-44b6-a38f-afa7b616dd8b\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-c5khd" Jan 29 12:31:35 crc kubenswrapper[4753]: I0129 12:31:35.955419 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/e4000d05-91fb-44b6-a38f-afa7b616dd8b-etc-swift\") pod \"swift-ring-rebalance-debug-c5khd\" (UID: \"e4000d05-91fb-44b6-a38f-afa7b616dd8b\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-c5khd" Jan 29 12:31:35 crc kubenswrapper[4753]: I0129 12:31:35.955460 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rv6v4\" (UniqueName: \"kubernetes.io/projected/e4000d05-91fb-44b6-a38f-afa7b616dd8b-kube-api-access-rv6v4\") pod \"swift-ring-rebalance-debug-c5khd\" (UID: \"e4000d05-91fb-44b6-a38f-afa7b616dd8b\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-c5khd" Jan 29 12:31:35 crc kubenswrapper[4753]: I0129 12:31:35.955499 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/e4000d05-91fb-44b6-a38f-afa7b616dd8b-swiftconf\") pod \"swift-ring-rebalance-debug-c5khd\" (UID: \"e4000d05-91fb-44b6-a38f-afa7b616dd8b\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-c5khd" Jan 29 12:31:35 crc kubenswrapper[4753]: I0129 12:31:35.955553 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e4000d05-91fb-44b6-a38f-afa7b616dd8b-scripts\") pod \"swift-ring-rebalance-debug-c5khd\" (UID: \"e4000d05-91fb-44b6-a38f-afa7b616dd8b\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-c5khd" Jan 29 12:31:35 crc kubenswrapper[4753]: I0129 12:31:35.957259 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/e4000d05-91fb-44b6-a38f-afa7b616dd8b-ring-data-devices\") pod \"swift-ring-rebalance-debug-c5khd\" (UID: \"e4000d05-91fb-44b6-a38f-afa7b616dd8b\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-c5khd" Jan 29 12:31:35 crc kubenswrapper[4753]: I0129 12:31:35.957311 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/e4000d05-91fb-44b6-a38f-afa7b616dd8b-etc-swift\") pod \"swift-ring-rebalance-debug-c5khd\" (UID: \"e4000d05-91fb-44b6-a38f-afa7b616dd8b\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-c5khd" Jan 29 12:31:35 crc kubenswrapper[4753]: I0129 12:31:35.957845 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e4000d05-91fb-44b6-a38f-afa7b616dd8b-scripts\") pod \"swift-ring-rebalance-debug-c5khd\" (UID: \"e4000d05-91fb-44b6-a38f-afa7b616dd8b\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-c5khd" Jan 29 12:31:35 crc kubenswrapper[4753]: I0129 12:31:35.962563 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/e4000d05-91fb-44b6-a38f-afa7b616dd8b-swiftconf\") pod \"swift-ring-rebalance-debug-c5khd\" (UID: \"e4000d05-91fb-44b6-a38f-afa7b616dd8b\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-c5khd" Jan 29 12:31:35 crc kubenswrapper[4753]: I0129 12:31:35.968456 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/e4000d05-91fb-44b6-a38f-afa7b616dd8b-dispersionconf\") pod \"swift-ring-rebalance-debug-c5khd\" (UID: \"e4000d05-91fb-44b6-a38f-afa7b616dd8b\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-c5khd" Jan 29 12:31:35 crc kubenswrapper[4753]: I0129 12:31:35.978453 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rv6v4\" (UniqueName: \"kubernetes.io/projected/e4000d05-91fb-44b6-a38f-afa7b616dd8b-kube-api-access-rv6v4\") pod \"swift-ring-rebalance-debug-c5khd\" (UID: \"e4000d05-91fb-44b6-a38f-afa7b616dd8b\") " pod="swift-kuttl-tests/swift-ring-rebalance-debug-c5khd" Jan 29 12:31:36 crc kubenswrapper[4753]: I0129 12:31:36.078217 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-ring-rebalance-debug-c5khd" Jan 29 12:31:36 crc kubenswrapper[4753]: I0129 12:31:36.815173 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/swift-ring-rebalance-debug-c5khd"] Jan 29 12:31:36 crc kubenswrapper[4753]: W0129 12:31:36.825106 4753 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode4000d05_91fb_44b6_a38f_afa7b616dd8b.slice/crio-a4668ed9215395f799b9e79e91f3aa2e4a900864f8b9b5d99317ee478d219072 WatchSource:0}: Error finding container a4668ed9215395f799b9e79e91f3aa2e4a900864f8b9b5d99317ee478d219072: Status 404 returned error can't find the container with id a4668ed9215395f799b9e79e91f3aa2e4a900864f8b9b5d99317ee478d219072 Jan 29 12:31:37 crc kubenswrapper[4753]: I0129 12:31:37.074050 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-ring-rebalance-debug-c5khd" event={"ID":"e4000d05-91fb-44b6-a38f-afa7b616dd8b","Type":"ContainerStarted","Data":"afa17d1f3e7e3bc437a4bb1ab0243ed81cbe29753991167d1e4e278f45c29ee5"} Jan 29 12:31:37 crc kubenswrapper[4753]: I0129 12:31:37.074462 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-ring-rebalance-debug-c5khd" event={"ID":"e4000d05-91fb-44b6-a38f-afa7b616dd8b","Type":"ContainerStarted","Data":"a4668ed9215395f799b9e79e91f3aa2e4a900864f8b9b5d99317ee478d219072"} Jan 29 12:31:37 crc kubenswrapper[4753]: I0129 12:31:37.098433 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="swift-kuttl-tests/swift-ring-rebalance-debug-c5khd" podStartSLOduration=2.098395279 podStartE2EDuration="2.098395279s" podCreationTimestamp="2026-01-29 12:31:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:31:37.096103624 +0000 UTC m=+1511.348185109" watchObservedRunningTime="2026-01-29 12:31:37.098395279 +0000 UTC m=+1511.350476734" Jan 29 12:31:39 crc kubenswrapper[4753]: I0129 12:31:39.098307 4753 generic.go:334] "Generic (PLEG): container finished" podID="e4000d05-91fb-44b6-a38f-afa7b616dd8b" containerID="afa17d1f3e7e3bc437a4bb1ab0243ed81cbe29753991167d1e4e278f45c29ee5" exitCode=0 Jan 29 12:31:39 crc kubenswrapper[4753]: I0129 12:31:39.098665 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-ring-rebalance-debug-c5khd" event={"ID":"e4000d05-91fb-44b6-a38f-afa7b616dd8b","Type":"ContainerDied","Data":"afa17d1f3e7e3bc437a4bb1ab0243ed81cbe29753991167d1e4e278f45c29ee5"} Jan 29 12:31:40 crc kubenswrapper[4753]: I0129 12:31:40.396570 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-ring-rebalance-debug-c5khd" Jan 29 12:31:40 crc kubenswrapper[4753]: I0129 12:31:40.436156 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["swift-kuttl-tests/swift-ring-rebalance-debug-c5khd"] Jan 29 12:31:40 crc kubenswrapper[4753]: I0129 12:31:40.446112 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["swift-kuttl-tests/swift-ring-rebalance-debug-c5khd"] Jan 29 12:31:40 crc kubenswrapper[4753]: I0129 12:31:40.542721 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["swift-kuttl-tests/swift-storage-1"] Jan 29 12:31:40 crc kubenswrapper[4753]: I0129 12:31:40.543400 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-1" podUID="cdf821d7-7cec-4c33-a59e-1c6493fd7281" containerName="account-server" containerID="cri-o://f0eddfb27e0b1ca03ff86905f0d1b81d674be9160c615a4f016f519d650869e7" gracePeriod=30 Jan 29 12:31:40 crc kubenswrapper[4753]: I0129 12:31:40.543482 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-1" podUID="cdf821d7-7cec-4c33-a59e-1c6493fd7281" containerName="container-replicator" containerID="cri-o://512e69e3786e84e63a587d8d9745a6408e329502de45c30a1be8a2813fce175e" gracePeriod=30 Jan 29 12:31:40 crc kubenswrapper[4753]: I0129 12:31:40.543473 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-1" podUID="cdf821d7-7cec-4c33-a59e-1c6493fd7281" containerName="container-auditor" containerID="cri-o://3d01ab550711d3f6ed3858797bfec7a7069bc75766d3c545e26de6dea09f5fb8" gracePeriod=30 Jan 29 12:31:40 crc kubenswrapper[4753]: I0129 12:31:40.543523 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-1" podUID="cdf821d7-7cec-4c33-a59e-1c6493fd7281" containerName="object-updater" containerID="cri-o://16c189b240c3b6af0b5143c94f27ad8f278cf55c71fde6ff59b1123203d274ec" gracePeriod=30 Jan 29 12:31:40 crc kubenswrapper[4753]: I0129 12:31:40.543563 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-1" podUID="cdf821d7-7cec-4c33-a59e-1c6493fd7281" containerName="container-updater" containerID="cri-o://0aec20004c67a50d19a5c8d1cf7ed9018b4b95c39a5978455971cdd8179addd1" gracePeriod=30 Jan 29 12:31:40 crc kubenswrapper[4753]: I0129 12:31:40.543584 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-1" podUID="cdf821d7-7cec-4c33-a59e-1c6493fd7281" containerName="object-auditor" containerID="cri-o://d29a9d10061279a2805ca6403127e650b1bbfe36a4163df4e079fae54c5a5970" gracePeriod=30 Jan 29 12:31:40 crc kubenswrapper[4753]: I0129 12:31:40.543455 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-1" podUID="cdf821d7-7cec-4c33-a59e-1c6493fd7281" containerName="object-server" containerID="cri-o://7375ec7e5486a51f1304b60d7e06caa5a53e836b083d9d627059439991053ffd" gracePeriod=30 Jan 29 12:31:40 crc kubenswrapper[4753]: I0129 12:31:40.543614 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-1" podUID="cdf821d7-7cec-4c33-a59e-1c6493fd7281" containerName="account-replicator" containerID="cri-o://54791b5e4f8e371f9007a17143cbc5838793bd8d3e1ce65c2822161e12d6146c" gracePeriod=30 Jan 29 12:31:40 crc kubenswrapper[4753]: I0129 12:31:40.543602 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-1" podUID="cdf821d7-7cec-4c33-a59e-1c6493fd7281" containerName="account-auditor" containerID="cri-o://b028c59c7810639143318f825119fe54eacc1a661cb573dc2455ed7e2ba850ed" gracePeriod=30 Jan 29 12:31:40 crc kubenswrapper[4753]: I0129 12:31:40.543640 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-1" podUID="cdf821d7-7cec-4c33-a59e-1c6493fd7281" containerName="swift-recon-cron" containerID="cri-o://194bd65bd1293b044532e09dbd55bdc880c52603b4ee58046654673e07e25e17" gracePeriod=30 Jan 29 12:31:40 crc kubenswrapper[4753]: I0129 12:31:40.543618 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-1" podUID="cdf821d7-7cec-4c33-a59e-1c6493fd7281" containerName="object-replicator" containerID="cri-o://4f9f3bd4b5dcda78683a4fd9f6169c0ba4cacc4f6a05deb5fb5b67519e1959ef" gracePeriod=30 Jan 29 12:31:40 crc kubenswrapper[4753]: I0129 12:31:40.543453 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-1" podUID="cdf821d7-7cec-4c33-a59e-1c6493fd7281" containerName="container-server" containerID="cri-o://49988160277c5cc45dd8900eb16d03199ec0c4ffa40e5904a5dbb7b9d2078f17" gracePeriod=30 Jan 29 12:31:40 crc kubenswrapper[4753]: I0129 12:31:40.543586 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-1" podUID="cdf821d7-7cec-4c33-a59e-1c6493fd7281" containerName="account-reaper" containerID="cri-o://6d62737aaefa0ae329ab69794e4a09fdbf4a1956c0ade81f0a19a89a0f563158" gracePeriod=30 Jan 29 12:31:40 crc kubenswrapper[4753]: I0129 12:31:40.543679 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-1" podUID="cdf821d7-7cec-4c33-a59e-1c6493fd7281" containerName="rsync" containerID="cri-o://e93a1c2eebca268c75cadde6e248adb908e05fb31946984b83eff825afbd1777" gracePeriod=30 Jan 29 12:31:40 crc kubenswrapper[4753]: I0129 12:31:40.543592 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-1" podUID="cdf821d7-7cec-4c33-a59e-1c6493fd7281" containerName="object-expirer" containerID="cri-o://e0df4d24defb12bbb6148384cb20d00499a3423ed6132f66831d8d0399b66e27" gracePeriod=30 Jan 29 12:31:40 crc kubenswrapper[4753]: I0129 12:31:40.569133 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["swift-kuttl-tests/swift-storage-0"] Jan 29 12:31:40 crc kubenswrapper[4753]: I0129 12:31:40.570468 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-0" podUID="fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1" containerName="object-server" containerID="cri-o://fea50e28f0174fd91362c9abb3b729cc30c5f58c114ab482b2127002cf3c630b" gracePeriod=30 Jan 29 12:31:40 crc kubenswrapper[4753]: I0129 12:31:40.571126 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-0" podUID="fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1" containerName="swift-recon-cron" containerID="cri-o://b8020c59d568fbfdd9bf91a35ad05830bd19b500dc075b3d4bc3eda195246fab" gracePeriod=30 Jan 29 12:31:40 crc kubenswrapper[4753]: I0129 12:31:40.571207 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-0" podUID="fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1" containerName="rsync" containerID="cri-o://2aa766e3f776a05fb581793f3b320cd526d9395e8e328f981b1cb5007225f32f" gracePeriod=30 Jan 29 12:31:40 crc kubenswrapper[4753]: I0129 12:31:40.571353 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-0" podUID="fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1" containerName="object-expirer" containerID="cri-o://ae14f26fca5ecff409fc265c6b8752e73c7ecc71d20ff281e328fa1fc8e808fd" gracePeriod=30 Jan 29 12:31:40 crc kubenswrapper[4753]: I0129 12:31:40.571417 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-0" podUID="fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1" containerName="object-updater" containerID="cri-o://da95de997017674f5f725d2434c76b898c8454d351d2ae0c048009fecac2a70b" gracePeriod=30 Jan 29 12:31:40 crc kubenswrapper[4753]: I0129 12:31:40.571466 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-0" podUID="fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1" containerName="object-auditor" containerID="cri-o://e1480ad953e1453491bfbacc844a038d09fdd5a675fd4ec79be9f3f750ee72ae" gracePeriod=30 Jan 29 12:31:40 crc kubenswrapper[4753]: I0129 12:31:40.571529 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-0" podUID="fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1" containerName="object-replicator" containerID="cri-o://f2e02b2ab58bd62d93b270812546264b3e9210ccac10d6b3ed2c1080f1dba78d" gracePeriod=30 Jan 29 12:31:40 crc kubenswrapper[4753]: I0129 12:31:40.571643 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-0" podUID="fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1" containerName="container-server" containerID="cri-o://0f829cdb3721a769b92e78700efd5425645fe4eed5ead53148401905c22bb46d" gracePeriod=30 Jan 29 12:31:40 crc kubenswrapper[4753]: I0129 12:31:40.571703 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-0" podUID="fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1" containerName="container-replicator" containerID="cri-o://b75616dd1d2571d9087cbb56f8d01e12a0d244229467ec074a4010fb12da21f3" gracePeriod=30 Jan 29 12:31:40 crc kubenswrapper[4753]: I0129 12:31:40.571712 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-0" podUID="fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1" containerName="container-auditor" containerID="cri-o://863f0dd428950d22ff5bb1d391e9d5b81ac1a1cbbe4e695d9602a641d07a1a22" gracePeriod=30 Jan 29 12:31:40 crc kubenswrapper[4753]: I0129 12:31:40.571811 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-0" podUID="fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1" containerName="account-auditor" containerID="cri-o://4bf0d941158a40c1b8708f56a8bbbbd8f371ac2b901f4843bcdc628feb29b830" gracePeriod=30 Jan 29 12:31:40 crc kubenswrapper[4753]: I0129 12:31:40.571878 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-0" podUID="fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1" containerName="account-reaper" containerID="cri-o://1661471db32399586faa01821a0cd7e36b5025a57e9a8d4f08b6a0cdc8302d89" gracePeriod=30 Jan 29 12:31:40 crc kubenswrapper[4753]: I0129 12:31:40.571887 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-0" podUID="fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1" containerName="container-updater" containerID="cri-o://21a68569c06ad75010210c795bc28381cfc8966ffbce36fdbe1e636ab5477089" gracePeriod=30 Jan 29 12:31:40 crc kubenswrapper[4753]: I0129 12:31:40.571967 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-0" podUID="fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1" containerName="account-replicator" containerID="cri-o://26ced7e34171d6bfce760564034338cc137c0fec95e21a1eda4b638ce903778b" gracePeriod=30 Jan 29 12:31:40 crc kubenswrapper[4753]: I0129 12:31:40.572035 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-0" podUID="fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1" containerName="account-server" containerID="cri-o://ebbaa821837a219076c7e5eaecb2900a7f98b03a800d03b39a05eb1bc69709ae" gracePeriod=30 Jan 29 12:31:40 crc kubenswrapper[4753]: I0129 12:31:40.596351 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/e4000d05-91fb-44b6-a38f-afa7b616dd8b-etc-swift\") pod \"e4000d05-91fb-44b6-a38f-afa7b616dd8b\" (UID: \"e4000d05-91fb-44b6-a38f-afa7b616dd8b\") " Jan 29 12:31:40 crc kubenswrapper[4753]: I0129 12:31:40.596539 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rv6v4\" (UniqueName: \"kubernetes.io/projected/e4000d05-91fb-44b6-a38f-afa7b616dd8b-kube-api-access-rv6v4\") pod \"e4000d05-91fb-44b6-a38f-afa7b616dd8b\" (UID: \"e4000d05-91fb-44b6-a38f-afa7b616dd8b\") " Jan 29 12:31:40 crc kubenswrapper[4753]: I0129 12:31:40.596602 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/e4000d05-91fb-44b6-a38f-afa7b616dd8b-swiftconf\") pod \"e4000d05-91fb-44b6-a38f-afa7b616dd8b\" (UID: \"e4000d05-91fb-44b6-a38f-afa7b616dd8b\") " Jan 29 12:31:40 crc kubenswrapper[4753]: I0129 12:31:40.596630 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/e4000d05-91fb-44b6-a38f-afa7b616dd8b-dispersionconf\") pod \"e4000d05-91fb-44b6-a38f-afa7b616dd8b\" (UID: \"e4000d05-91fb-44b6-a38f-afa7b616dd8b\") " Jan 29 12:31:40 crc kubenswrapper[4753]: I0129 12:31:40.596663 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/e4000d05-91fb-44b6-a38f-afa7b616dd8b-ring-data-devices\") pod \"e4000d05-91fb-44b6-a38f-afa7b616dd8b\" (UID: \"e4000d05-91fb-44b6-a38f-afa7b616dd8b\") " Jan 29 12:31:40 crc kubenswrapper[4753]: I0129 12:31:40.596692 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e4000d05-91fb-44b6-a38f-afa7b616dd8b-scripts\") pod \"e4000d05-91fb-44b6-a38f-afa7b616dd8b\" (UID: \"e4000d05-91fb-44b6-a38f-afa7b616dd8b\") " Jan 29 12:31:40 crc kubenswrapper[4753]: I0129 12:31:40.598579 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e4000d05-91fb-44b6-a38f-afa7b616dd8b-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "e4000d05-91fb-44b6-a38f-afa7b616dd8b" (UID: "e4000d05-91fb-44b6-a38f-afa7b616dd8b"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:31:40 crc kubenswrapper[4753]: I0129 12:31:40.609709 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["swift-kuttl-tests/swift-storage-2"] Jan 29 12:31:40 crc kubenswrapper[4753]: I0129 12:31:40.610368 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-2" podUID="5f186cee-89fe-4c25-b227-628ffcb2a98f" containerName="account-server" containerID="cri-o://1737e0ba3b3d084614c0a7a06cc57cad8d732ad8ed13e0c65e0965dfefef1d63" gracePeriod=30 Jan 29 12:31:40 crc kubenswrapper[4753]: I0129 12:31:40.610859 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-2" podUID="5f186cee-89fe-4c25-b227-628ffcb2a98f" containerName="swift-recon-cron" containerID="cri-o://0a2445c3569e08520140f26ebf5205ae7e804cd6709b85f2a1fb89ce86ea5674" gracePeriod=30 Jan 29 12:31:40 crc kubenswrapper[4753]: I0129 12:31:40.610919 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-2" podUID="5f186cee-89fe-4c25-b227-628ffcb2a98f" containerName="rsync" containerID="cri-o://cdf458faa8e41f1db5e8106107119c792aea631dcda870cad66d95e8a74440aa" gracePeriod=30 Jan 29 12:31:40 crc kubenswrapper[4753]: I0129 12:31:40.610961 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-2" podUID="5f186cee-89fe-4c25-b227-628ffcb2a98f" containerName="object-expirer" containerID="cri-o://50d6ac3c69c7caed60fbf1e5bb69f1a917f89479ed3e932b76121dae17629596" gracePeriod=30 Jan 29 12:31:40 crc kubenswrapper[4753]: I0129 12:31:40.611020 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-2" podUID="5f186cee-89fe-4c25-b227-628ffcb2a98f" containerName="object-updater" containerID="cri-o://afe9540d6c8487275d7527ca39a1a61852a5605ffc987d75def8883cdd9c9d91" gracePeriod=30 Jan 29 12:31:40 crc kubenswrapper[4753]: I0129 12:31:40.611063 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-2" podUID="5f186cee-89fe-4c25-b227-628ffcb2a98f" containerName="object-auditor" containerID="cri-o://c59dfabc0782c5e45570ebd81541d4892c4494e9a05b790d9d0166ca98e35355" gracePeriod=30 Jan 29 12:31:40 crc kubenswrapper[4753]: I0129 12:31:40.611100 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-2" podUID="5f186cee-89fe-4c25-b227-628ffcb2a98f" containerName="object-replicator" containerID="cri-o://b1ec4dfcf68549610147c8cd4d5995a51e66768570c5c1fdb817858b572a2aef" gracePeriod=30 Jan 29 12:31:40 crc kubenswrapper[4753]: I0129 12:31:40.611138 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-2" podUID="5f186cee-89fe-4c25-b227-628ffcb2a98f" containerName="object-server" containerID="cri-o://824a9ea0f9ab12597c31967bb66834f5b783df0905b01089fcbd3905b858c483" gracePeriod=30 Jan 29 12:31:40 crc kubenswrapper[4753]: I0129 12:31:40.611174 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-2" podUID="5f186cee-89fe-4c25-b227-628ffcb2a98f" containerName="container-updater" containerID="cri-o://d112d59361aa4821a24ce857e58ec6ead1b63c9892c956f350e18f52f9e10e65" gracePeriod=30 Jan 29 12:31:40 crc kubenswrapper[4753]: I0129 12:31:40.611212 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-2" podUID="5f186cee-89fe-4c25-b227-628ffcb2a98f" containerName="container-auditor" containerID="cri-o://3225ffbf14a2f46af26051f69cf072841186d28c18bf03982b6718349f799de8" gracePeriod=30 Jan 29 12:31:40 crc kubenswrapper[4753]: I0129 12:31:40.611365 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-2" podUID="5f186cee-89fe-4c25-b227-628ffcb2a98f" containerName="container-replicator" containerID="cri-o://3705c5801c3eba8ddc1d2890a6acdfcdf4c62e5da07c136d125988be1e360884" gracePeriod=30 Jan 29 12:31:40 crc kubenswrapper[4753]: I0129 12:31:40.611412 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-2" podUID="5f186cee-89fe-4c25-b227-628ffcb2a98f" containerName="container-server" containerID="cri-o://9f7afb748e128f506597d0e087b167fcba9224b8b7016f8ee496a4591b0e162c" gracePeriod=30 Jan 29 12:31:40 crc kubenswrapper[4753]: I0129 12:31:40.611457 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-2" podUID="5f186cee-89fe-4c25-b227-628ffcb2a98f" containerName="account-reaper" containerID="cri-o://31d6993374213dd3b7e564d0a4085cc39908df3ddaf6e54182f707a289ddab96" gracePeriod=30 Jan 29 12:31:40 crc kubenswrapper[4753]: I0129 12:31:40.611500 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-2" podUID="5f186cee-89fe-4c25-b227-628ffcb2a98f" containerName="account-auditor" containerID="cri-o://056e27f46aca1e39fcb6267b02e6b8748de9f6836da7bd55a0b5148163a59cdf" gracePeriod=30 Jan 29 12:31:40 crc kubenswrapper[4753]: I0129 12:31:40.611596 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-2" podUID="5f186cee-89fe-4c25-b227-628ffcb2a98f" containerName="account-replicator" containerID="cri-o://8ade60df808d454198934fc448217bcca19a3633d29a12c5a706cb873f806811" gracePeriod=30 Jan 29 12:31:40 crc kubenswrapper[4753]: I0129 12:31:40.612207 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e4000d05-91fb-44b6-a38f-afa7b616dd8b-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "e4000d05-91fb-44b6-a38f-afa7b616dd8b" (UID: "e4000d05-91fb-44b6-a38f-afa7b616dd8b"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:31:40 crc kubenswrapper[4753]: I0129 12:31:40.637398 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e4000d05-91fb-44b6-a38f-afa7b616dd8b-kube-api-access-rv6v4" (OuterVolumeSpecName: "kube-api-access-rv6v4") pod "e4000d05-91fb-44b6-a38f-afa7b616dd8b" (UID: "e4000d05-91fb-44b6-a38f-afa7b616dd8b"). InnerVolumeSpecName "kube-api-access-rv6v4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:31:40 crc kubenswrapper[4753]: I0129 12:31:40.640527 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e4000d05-91fb-44b6-a38f-afa7b616dd8b-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "e4000d05-91fb-44b6-a38f-afa7b616dd8b" (UID: "e4000d05-91fb-44b6-a38f-afa7b616dd8b"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:31:40 crc kubenswrapper[4753]: I0129 12:31:40.646496 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["swift-kuttl-tests/swift-ring-rebalance-pckfk"] Jan 29 12:31:40 crc kubenswrapper[4753]: I0129 12:31:40.652711 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e4000d05-91fb-44b6-a38f-afa7b616dd8b-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "e4000d05-91fb-44b6-a38f-afa7b616dd8b" (UID: "e4000d05-91fb-44b6-a38f-afa7b616dd8b"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:31:40 crc kubenswrapper[4753]: I0129 12:31:40.682348 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["swift-kuttl-tests/swift-ring-rebalance-pckfk"] Jan 29 12:31:40 crc kubenswrapper[4753]: I0129 12:31:40.688911 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e4000d05-91fb-44b6-a38f-afa7b616dd8b-scripts" (OuterVolumeSpecName: "scripts") pod "e4000d05-91fb-44b6-a38f-afa7b616dd8b" (UID: "e4000d05-91fb-44b6-a38f-afa7b616dd8b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:31:40 crc kubenswrapper[4753]: I0129 12:31:40.700737 4753 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/e4000d05-91fb-44b6-a38f-afa7b616dd8b-etc-swift\") on node \"crc\" DevicePath \"\"" Jan 29 12:31:40 crc kubenswrapper[4753]: I0129 12:31:40.700765 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rv6v4\" (UniqueName: \"kubernetes.io/projected/e4000d05-91fb-44b6-a38f-afa7b616dd8b-kube-api-access-rv6v4\") on node \"crc\" DevicePath \"\"" Jan 29 12:31:40 crc kubenswrapper[4753]: I0129 12:31:40.700777 4753 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/e4000d05-91fb-44b6-a38f-afa7b616dd8b-swiftconf\") on node \"crc\" DevicePath \"\"" Jan 29 12:31:40 crc kubenswrapper[4753]: I0129 12:31:40.700786 4753 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/e4000d05-91fb-44b6-a38f-afa7b616dd8b-dispersionconf\") on node \"crc\" DevicePath \"\"" Jan 29 12:31:40 crc kubenswrapper[4753]: I0129 12:31:40.700796 4753 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/e4000d05-91fb-44b6-a38f-afa7b616dd8b-ring-data-devices\") on node \"crc\" DevicePath \"\"" Jan 29 12:31:40 crc kubenswrapper[4753]: I0129 12:31:40.700806 4753 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e4000d05-91fb-44b6-a38f-afa7b616dd8b-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 12:31:40 crc kubenswrapper[4753]: I0129 12:31:40.713569 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["swift-kuttl-tests/swift-proxy-6bc96d68cf-kbrl4"] Jan 29 12:31:40 crc kubenswrapper[4753]: I0129 12:31:40.713882 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-proxy-6bc96d68cf-kbrl4" podUID="cec6584a-fcc6-4b1d-8516-c2e6b2194048" containerName="proxy-httpd" containerID="cri-o://bdc2ce23aa0b4c724f557767c41572887c03d3fefc7a47884d3282150817b6d9" gracePeriod=30 Jan 29 12:31:40 crc kubenswrapper[4753]: I0129 12:31:40.714018 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-proxy-6bc96d68cf-kbrl4" podUID="cec6584a-fcc6-4b1d-8516-c2e6b2194048" containerName="proxy-server" containerID="cri-o://3c03eaf094d75e935a3ac244d63b044ef319a92959b6f9fd416b8a3dbdbcc0da" gracePeriod=30 Jan 29 12:31:41 crc kubenswrapper[4753]: I0129 12:31:41.125434 4753 generic.go:334] "Generic (PLEG): container finished" podID="cdf821d7-7cec-4c33-a59e-1c6493fd7281" containerID="e93a1c2eebca268c75cadde6e248adb908e05fb31946984b83eff825afbd1777" exitCode=0 Jan 29 12:31:41 crc kubenswrapper[4753]: I0129 12:31:41.125476 4753 generic.go:334] "Generic (PLEG): container finished" podID="cdf821d7-7cec-4c33-a59e-1c6493fd7281" containerID="e0df4d24defb12bbb6148384cb20d00499a3423ed6132f66831d8d0399b66e27" exitCode=0 Jan 29 12:31:41 crc kubenswrapper[4753]: I0129 12:31:41.125488 4753 generic.go:334] "Generic (PLEG): container finished" podID="cdf821d7-7cec-4c33-a59e-1c6493fd7281" containerID="16c189b240c3b6af0b5143c94f27ad8f278cf55c71fde6ff59b1123203d274ec" exitCode=0 Jan 29 12:31:41 crc kubenswrapper[4753]: I0129 12:31:41.125498 4753 generic.go:334] "Generic (PLEG): container finished" podID="cdf821d7-7cec-4c33-a59e-1c6493fd7281" containerID="d29a9d10061279a2805ca6403127e650b1bbfe36a4163df4e079fae54c5a5970" exitCode=0 Jan 29 12:31:41 crc kubenswrapper[4753]: I0129 12:31:41.125509 4753 generic.go:334] "Generic (PLEG): container finished" podID="cdf821d7-7cec-4c33-a59e-1c6493fd7281" containerID="4f9f3bd4b5dcda78683a4fd9f6169c0ba4cacc4f6a05deb5fb5b67519e1959ef" exitCode=0 Jan 29 12:31:41 crc kubenswrapper[4753]: I0129 12:31:41.125497 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"cdf821d7-7cec-4c33-a59e-1c6493fd7281","Type":"ContainerDied","Data":"e93a1c2eebca268c75cadde6e248adb908e05fb31946984b83eff825afbd1777"} Jan 29 12:31:41 crc kubenswrapper[4753]: I0129 12:31:41.125518 4753 generic.go:334] "Generic (PLEG): container finished" podID="cdf821d7-7cec-4c33-a59e-1c6493fd7281" containerID="0aec20004c67a50d19a5c8d1cf7ed9018b4b95c39a5978455971cdd8179addd1" exitCode=0 Jan 29 12:31:41 crc kubenswrapper[4753]: I0129 12:31:41.125527 4753 generic.go:334] "Generic (PLEG): container finished" podID="cdf821d7-7cec-4c33-a59e-1c6493fd7281" containerID="3d01ab550711d3f6ed3858797bfec7a7069bc75766d3c545e26de6dea09f5fb8" exitCode=0 Jan 29 12:31:41 crc kubenswrapper[4753]: I0129 12:31:41.125535 4753 generic.go:334] "Generic (PLEG): container finished" podID="cdf821d7-7cec-4c33-a59e-1c6493fd7281" containerID="512e69e3786e84e63a587d8d9745a6408e329502de45c30a1be8a2813fce175e" exitCode=0 Jan 29 12:31:41 crc kubenswrapper[4753]: I0129 12:31:41.125545 4753 generic.go:334] "Generic (PLEG): container finished" podID="cdf821d7-7cec-4c33-a59e-1c6493fd7281" containerID="6d62737aaefa0ae329ab69794e4a09fdbf4a1956c0ade81f0a19a89a0f563158" exitCode=0 Jan 29 12:31:41 crc kubenswrapper[4753]: I0129 12:31:41.125554 4753 generic.go:334] "Generic (PLEG): container finished" podID="cdf821d7-7cec-4c33-a59e-1c6493fd7281" containerID="b028c59c7810639143318f825119fe54eacc1a661cb573dc2455ed7e2ba850ed" exitCode=0 Jan 29 12:31:41 crc kubenswrapper[4753]: I0129 12:31:41.125563 4753 generic.go:334] "Generic (PLEG): container finished" podID="cdf821d7-7cec-4c33-a59e-1c6493fd7281" containerID="54791b5e4f8e371f9007a17143cbc5838793bd8d3e1ce65c2822161e12d6146c" exitCode=0 Jan 29 12:31:41 crc kubenswrapper[4753]: I0129 12:31:41.125546 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"cdf821d7-7cec-4c33-a59e-1c6493fd7281","Type":"ContainerDied","Data":"e0df4d24defb12bbb6148384cb20d00499a3423ed6132f66831d8d0399b66e27"} Jan 29 12:31:41 crc kubenswrapper[4753]: I0129 12:31:41.125625 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"cdf821d7-7cec-4c33-a59e-1c6493fd7281","Type":"ContainerDied","Data":"16c189b240c3b6af0b5143c94f27ad8f278cf55c71fde6ff59b1123203d274ec"} Jan 29 12:31:41 crc kubenswrapper[4753]: I0129 12:31:41.125640 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"cdf821d7-7cec-4c33-a59e-1c6493fd7281","Type":"ContainerDied","Data":"d29a9d10061279a2805ca6403127e650b1bbfe36a4163df4e079fae54c5a5970"} Jan 29 12:31:41 crc kubenswrapper[4753]: I0129 12:31:41.125654 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"cdf821d7-7cec-4c33-a59e-1c6493fd7281","Type":"ContainerDied","Data":"4f9f3bd4b5dcda78683a4fd9f6169c0ba4cacc4f6a05deb5fb5b67519e1959ef"} Jan 29 12:31:41 crc kubenswrapper[4753]: I0129 12:31:41.125667 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"cdf821d7-7cec-4c33-a59e-1c6493fd7281","Type":"ContainerDied","Data":"0aec20004c67a50d19a5c8d1cf7ed9018b4b95c39a5978455971cdd8179addd1"} Jan 29 12:31:41 crc kubenswrapper[4753]: I0129 12:31:41.125681 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"cdf821d7-7cec-4c33-a59e-1c6493fd7281","Type":"ContainerDied","Data":"3d01ab550711d3f6ed3858797bfec7a7069bc75766d3c545e26de6dea09f5fb8"} Jan 29 12:31:41 crc kubenswrapper[4753]: I0129 12:31:41.125691 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"cdf821d7-7cec-4c33-a59e-1c6493fd7281","Type":"ContainerDied","Data":"512e69e3786e84e63a587d8d9745a6408e329502de45c30a1be8a2813fce175e"} Jan 29 12:31:41 crc kubenswrapper[4753]: I0129 12:31:41.125703 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"cdf821d7-7cec-4c33-a59e-1c6493fd7281","Type":"ContainerDied","Data":"6d62737aaefa0ae329ab69794e4a09fdbf4a1956c0ade81f0a19a89a0f563158"} Jan 29 12:31:41 crc kubenswrapper[4753]: I0129 12:31:41.125716 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"cdf821d7-7cec-4c33-a59e-1c6493fd7281","Type":"ContainerDied","Data":"b028c59c7810639143318f825119fe54eacc1a661cb573dc2455ed7e2ba850ed"} Jan 29 12:31:41 crc kubenswrapper[4753]: I0129 12:31:41.125727 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"cdf821d7-7cec-4c33-a59e-1c6493fd7281","Type":"ContainerDied","Data":"54791b5e4f8e371f9007a17143cbc5838793bd8d3e1ce65c2822161e12d6146c"} Jan 29 12:31:41 crc kubenswrapper[4753]: I0129 12:31:41.127980 4753 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a4668ed9215395f799b9e79e91f3aa2e4a900864f8b9b5d99317ee478d219072" Jan 29 12:31:41 crc kubenswrapper[4753]: I0129 12:31:41.128008 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-ring-rebalance-debug-c5khd" Jan 29 12:31:41 crc kubenswrapper[4753]: I0129 12:31:41.130141 4753 generic.go:334] "Generic (PLEG): container finished" podID="cec6584a-fcc6-4b1d-8516-c2e6b2194048" containerID="bdc2ce23aa0b4c724f557767c41572887c03d3fefc7a47884d3282150817b6d9" exitCode=0 Jan 29 12:31:41 crc kubenswrapper[4753]: I0129 12:31:41.130278 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-proxy-6bc96d68cf-kbrl4" event={"ID":"cec6584a-fcc6-4b1d-8516-c2e6b2194048","Type":"ContainerDied","Data":"bdc2ce23aa0b4c724f557767c41572887c03d3fefc7a47884d3282150817b6d9"} Jan 29 12:31:41 crc kubenswrapper[4753]: I0129 12:31:41.143770 4753 generic.go:334] "Generic (PLEG): container finished" podID="5f186cee-89fe-4c25-b227-628ffcb2a98f" containerID="50d6ac3c69c7caed60fbf1e5bb69f1a917f89479ed3e932b76121dae17629596" exitCode=0 Jan 29 12:31:41 crc kubenswrapper[4753]: I0129 12:31:41.143813 4753 generic.go:334] "Generic (PLEG): container finished" podID="5f186cee-89fe-4c25-b227-628ffcb2a98f" containerID="afe9540d6c8487275d7527ca39a1a61852a5605ffc987d75def8883cdd9c9d91" exitCode=0 Jan 29 12:31:41 crc kubenswrapper[4753]: I0129 12:31:41.143823 4753 generic.go:334] "Generic (PLEG): container finished" podID="5f186cee-89fe-4c25-b227-628ffcb2a98f" containerID="c59dfabc0782c5e45570ebd81541d4892c4494e9a05b790d9d0166ca98e35355" exitCode=0 Jan 29 12:31:41 crc kubenswrapper[4753]: I0129 12:31:41.143833 4753 generic.go:334] "Generic (PLEG): container finished" podID="5f186cee-89fe-4c25-b227-628ffcb2a98f" containerID="b1ec4dfcf68549610147c8cd4d5995a51e66768570c5c1fdb817858b572a2aef" exitCode=0 Jan 29 12:31:41 crc kubenswrapper[4753]: I0129 12:31:41.143844 4753 generic.go:334] "Generic (PLEG): container finished" podID="5f186cee-89fe-4c25-b227-628ffcb2a98f" containerID="d112d59361aa4821a24ce857e58ec6ead1b63c9892c956f350e18f52f9e10e65" exitCode=0 Jan 29 12:31:41 crc kubenswrapper[4753]: I0129 12:31:41.143853 4753 generic.go:334] "Generic (PLEG): container finished" podID="5f186cee-89fe-4c25-b227-628ffcb2a98f" containerID="3225ffbf14a2f46af26051f69cf072841186d28c18bf03982b6718349f799de8" exitCode=0 Jan 29 12:31:41 crc kubenswrapper[4753]: I0129 12:31:41.143861 4753 generic.go:334] "Generic (PLEG): container finished" podID="5f186cee-89fe-4c25-b227-628ffcb2a98f" containerID="3705c5801c3eba8ddc1d2890a6acdfcdf4c62e5da07c136d125988be1e360884" exitCode=0 Jan 29 12:31:41 crc kubenswrapper[4753]: I0129 12:31:41.143869 4753 generic.go:334] "Generic (PLEG): container finished" podID="5f186cee-89fe-4c25-b227-628ffcb2a98f" containerID="31d6993374213dd3b7e564d0a4085cc39908df3ddaf6e54182f707a289ddab96" exitCode=0 Jan 29 12:31:41 crc kubenswrapper[4753]: I0129 12:31:41.143877 4753 generic.go:334] "Generic (PLEG): container finished" podID="5f186cee-89fe-4c25-b227-628ffcb2a98f" containerID="056e27f46aca1e39fcb6267b02e6b8748de9f6836da7bd55a0b5148163a59cdf" exitCode=0 Jan 29 12:31:41 crc kubenswrapper[4753]: I0129 12:31:41.143885 4753 generic.go:334] "Generic (PLEG): container finished" podID="5f186cee-89fe-4c25-b227-628ffcb2a98f" containerID="8ade60df808d454198934fc448217bcca19a3633d29a12c5a706cb873f806811" exitCode=0 Jan 29 12:31:41 crc kubenswrapper[4753]: I0129 12:31:41.143939 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"5f186cee-89fe-4c25-b227-628ffcb2a98f","Type":"ContainerDied","Data":"50d6ac3c69c7caed60fbf1e5bb69f1a917f89479ed3e932b76121dae17629596"} Jan 29 12:31:41 crc kubenswrapper[4753]: I0129 12:31:41.143976 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"5f186cee-89fe-4c25-b227-628ffcb2a98f","Type":"ContainerDied","Data":"afe9540d6c8487275d7527ca39a1a61852a5605ffc987d75def8883cdd9c9d91"} Jan 29 12:31:41 crc kubenswrapper[4753]: I0129 12:31:41.143992 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"5f186cee-89fe-4c25-b227-628ffcb2a98f","Type":"ContainerDied","Data":"c59dfabc0782c5e45570ebd81541d4892c4494e9a05b790d9d0166ca98e35355"} Jan 29 12:31:41 crc kubenswrapper[4753]: I0129 12:31:41.144003 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"5f186cee-89fe-4c25-b227-628ffcb2a98f","Type":"ContainerDied","Data":"b1ec4dfcf68549610147c8cd4d5995a51e66768570c5c1fdb817858b572a2aef"} Jan 29 12:31:41 crc kubenswrapper[4753]: I0129 12:31:41.144014 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"5f186cee-89fe-4c25-b227-628ffcb2a98f","Type":"ContainerDied","Data":"d112d59361aa4821a24ce857e58ec6ead1b63c9892c956f350e18f52f9e10e65"} Jan 29 12:31:41 crc kubenswrapper[4753]: I0129 12:31:41.144025 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"5f186cee-89fe-4c25-b227-628ffcb2a98f","Type":"ContainerDied","Data":"3225ffbf14a2f46af26051f69cf072841186d28c18bf03982b6718349f799de8"} Jan 29 12:31:41 crc kubenswrapper[4753]: I0129 12:31:41.144037 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"5f186cee-89fe-4c25-b227-628ffcb2a98f","Type":"ContainerDied","Data":"3705c5801c3eba8ddc1d2890a6acdfcdf4c62e5da07c136d125988be1e360884"} Jan 29 12:31:41 crc kubenswrapper[4753]: I0129 12:31:41.144047 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"5f186cee-89fe-4c25-b227-628ffcb2a98f","Type":"ContainerDied","Data":"31d6993374213dd3b7e564d0a4085cc39908df3ddaf6e54182f707a289ddab96"} Jan 29 12:31:41 crc kubenswrapper[4753]: I0129 12:31:41.144058 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"5f186cee-89fe-4c25-b227-628ffcb2a98f","Type":"ContainerDied","Data":"056e27f46aca1e39fcb6267b02e6b8748de9f6836da7bd55a0b5148163a59cdf"} Jan 29 12:31:41 crc kubenswrapper[4753]: I0129 12:31:41.144068 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"5f186cee-89fe-4c25-b227-628ffcb2a98f","Type":"ContainerDied","Data":"8ade60df808d454198934fc448217bcca19a3633d29a12c5a706cb873f806811"} Jan 29 12:31:41 crc kubenswrapper[4753]: I0129 12:31:41.164086 4753 generic.go:334] "Generic (PLEG): container finished" podID="fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1" containerID="2aa766e3f776a05fb581793f3b320cd526d9395e8e328f981b1cb5007225f32f" exitCode=0 Jan 29 12:31:41 crc kubenswrapper[4753]: I0129 12:31:41.164120 4753 generic.go:334] "Generic (PLEG): container finished" podID="fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1" containerID="ae14f26fca5ecff409fc265c6b8752e73c7ecc71d20ff281e328fa1fc8e808fd" exitCode=0 Jan 29 12:31:41 crc kubenswrapper[4753]: I0129 12:31:41.164129 4753 generic.go:334] "Generic (PLEG): container finished" podID="fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1" containerID="da95de997017674f5f725d2434c76b898c8454d351d2ae0c048009fecac2a70b" exitCode=0 Jan 29 12:31:41 crc kubenswrapper[4753]: I0129 12:31:41.164137 4753 generic.go:334] "Generic (PLEG): container finished" podID="fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1" containerID="e1480ad953e1453491bfbacc844a038d09fdd5a675fd4ec79be9f3f750ee72ae" exitCode=0 Jan 29 12:31:41 crc kubenswrapper[4753]: I0129 12:31:41.164144 4753 generic.go:334] "Generic (PLEG): container finished" podID="fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1" containerID="f2e02b2ab58bd62d93b270812546264b3e9210ccac10d6b3ed2c1080f1dba78d" exitCode=0 Jan 29 12:31:41 crc kubenswrapper[4753]: I0129 12:31:41.164150 4753 generic.go:334] "Generic (PLEG): container finished" podID="fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1" containerID="21a68569c06ad75010210c795bc28381cfc8966ffbce36fdbe1e636ab5477089" exitCode=0 Jan 29 12:31:41 crc kubenswrapper[4753]: I0129 12:31:41.164159 4753 generic.go:334] "Generic (PLEG): container finished" podID="fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1" containerID="863f0dd428950d22ff5bb1d391e9d5b81ac1a1cbbe4e695d9602a641d07a1a22" exitCode=0 Jan 29 12:31:41 crc kubenswrapper[4753]: I0129 12:31:41.164180 4753 generic.go:334] "Generic (PLEG): container finished" podID="fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1" containerID="b75616dd1d2571d9087cbb56f8d01e12a0d244229467ec074a4010fb12da21f3" exitCode=0 Jan 29 12:31:41 crc kubenswrapper[4753]: I0129 12:31:41.164188 4753 generic.go:334] "Generic (PLEG): container finished" podID="fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1" containerID="1661471db32399586faa01821a0cd7e36b5025a57e9a8d4f08b6a0cdc8302d89" exitCode=0 Jan 29 12:31:41 crc kubenswrapper[4753]: I0129 12:31:41.164194 4753 generic.go:334] "Generic (PLEG): container finished" podID="fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1" containerID="4bf0d941158a40c1b8708f56a8bbbbd8f371ac2b901f4843bcdc628feb29b830" exitCode=0 Jan 29 12:31:41 crc kubenswrapper[4753]: I0129 12:31:41.164200 4753 generic.go:334] "Generic (PLEG): container finished" podID="fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1" containerID="26ced7e34171d6bfce760564034338cc137c0fec95e21a1eda4b638ce903778b" exitCode=0 Jan 29 12:31:41 crc kubenswrapper[4753]: I0129 12:31:41.164270 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1","Type":"ContainerDied","Data":"2aa766e3f776a05fb581793f3b320cd526d9395e8e328f981b1cb5007225f32f"} Jan 29 12:31:41 crc kubenswrapper[4753]: I0129 12:31:41.164301 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1","Type":"ContainerDied","Data":"ae14f26fca5ecff409fc265c6b8752e73c7ecc71d20ff281e328fa1fc8e808fd"} Jan 29 12:31:41 crc kubenswrapper[4753]: I0129 12:31:41.164312 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1","Type":"ContainerDied","Data":"da95de997017674f5f725d2434c76b898c8454d351d2ae0c048009fecac2a70b"} Jan 29 12:31:41 crc kubenswrapper[4753]: I0129 12:31:41.164320 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1","Type":"ContainerDied","Data":"e1480ad953e1453491bfbacc844a038d09fdd5a675fd4ec79be9f3f750ee72ae"} Jan 29 12:31:41 crc kubenswrapper[4753]: I0129 12:31:41.164329 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1","Type":"ContainerDied","Data":"f2e02b2ab58bd62d93b270812546264b3e9210ccac10d6b3ed2c1080f1dba78d"} Jan 29 12:31:41 crc kubenswrapper[4753]: I0129 12:31:41.164337 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1","Type":"ContainerDied","Data":"21a68569c06ad75010210c795bc28381cfc8966ffbce36fdbe1e636ab5477089"} Jan 29 12:31:41 crc kubenswrapper[4753]: I0129 12:31:41.164346 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1","Type":"ContainerDied","Data":"863f0dd428950d22ff5bb1d391e9d5b81ac1a1cbbe4e695d9602a641d07a1a22"} Jan 29 12:31:41 crc kubenswrapper[4753]: I0129 12:31:41.164354 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1","Type":"ContainerDied","Data":"b75616dd1d2571d9087cbb56f8d01e12a0d244229467ec074a4010fb12da21f3"} Jan 29 12:31:41 crc kubenswrapper[4753]: I0129 12:31:41.164364 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1","Type":"ContainerDied","Data":"1661471db32399586faa01821a0cd7e36b5025a57e9a8d4f08b6a0cdc8302d89"} Jan 29 12:31:41 crc kubenswrapper[4753]: I0129 12:31:41.164373 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1","Type":"ContainerDied","Data":"4bf0d941158a40c1b8708f56a8bbbbd8f371ac2b901f4843bcdc628feb29b830"} Jan 29 12:31:41 crc kubenswrapper[4753]: I0129 12:31:41.164381 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1","Type":"ContainerDied","Data":"26ced7e34171d6bfce760564034338cc137c0fec95e21a1eda4b638ce903778b"} Jan 29 12:31:41 crc kubenswrapper[4753]: I0129 12:31:41.897433 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e4000d05-91fb-44b6-a38f-afa7b616dd8b" path="/var/lib/kubelet/pods/e4000d05-91fb-44b6-a38f-afa7b616dd8b/volumes" Jan 29 12:31:41 crc kubenswrapper[4753]: I0129 12:31:41.898345 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f284a293-aa84-46cd-bb59-b2d8e3d1762c" path="/var/lib/kubelet/pods/f284a293-aa84-46cd-bb59-b2d8e3d1762c/volumes" Jan 29 12:31:41 crc kubenswrapper[4753]: I0129 12:31:41.972726 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-proxy-6bc96d68cf-kbrl4" Jan 29 12:31:42 crc kubenswrapper[4753]: I0129 12:31:42.122525 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cec6584a-fcc6-4b1d-8516-c2e6b2194048-log-httpd\") pod \"cec6584a-fcc6-4b1d-8516-c2e6b2194048\" (UID: \"cec6584a-fcc6-4b1d-8516-c2e6b2194048\") " Jan 29 12:31:42 crc kubenswrapper[4753]: I0129 12:31:42.122652 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/cec6584a-fcc6-4b1d-8516-c2e6b2194048-etc-swift\") pod \"cec6584a-fcc6-4b1d-8516-c2e6b2194048\" (UID: \"cec6584a-fcc6-4b1d-8516-c2e6b2194048\") " Jan 29 12:31:42 crc kubenswrapper[4753]: I0129 12:31:42.122695 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cec6584a-fcc6-4b1d-8516-c2e6b2194048-run-httpd\") pod \"cec6584a-fcc6-4b1d-8516-c2e6b2194048\" (UID: \"cec6584a-fcc6-4b1d-8516-c2e6b2194048\") " Jan 29 12:31:42 crc kubenswrapper[4753]: I0129 12:31:42.122780 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pxf6k\" (UniqueName: \"kubernetes.io/projected/cec6584a-fcc6-4b1d-8516-c2e6b2194048-kube-api-access-pxf6k\") pod \"cec6584a-fcc6-4b1d-8516-c2e6b2194048\" (UID: \"cec6584a-fcc6-4b1d-8516-c2e6b2194048\") " Jan 29 12:31:42 crc kubenswrapper[4753]: I0129 12:31:42.122815 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cec6584a-fcc6-4b1d-8516-c2e6b2194048-config-data\") pod \"cec6584a-fcc6-4b1d-8516-c2e6b2194048\" (UID: \"cec6584a-fcc6-4b1d-8516-c2e6b2194048\") " Jan 29 12:31:42 crc kubenswrapper[4753]: I0129 12:31:42.123076 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cec6584a-fcc6-4b1d-8516-c2e6b2194048-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "cec6584a-fcc6-4b1d-8516-c2e6b2194048" (UID: "cec6584a-fcc6-4b1d-8516-c2e6b2194048"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:31:42 crc kubenswrapper[4753]: I0129 12:31:42.123198 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cec6584a-fcc6-4b1d-8516-c2e6b2194048-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "cec6584a-fcc6-4b1d-8516-c2e6b2194048" (UID: "cec6584a-fcc6-4b1d-8516-c2e6b2194048"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:31:42 crc kubenswrapper[4753]: I0129 12:31:42.123395 4753 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cec6584a-fcc6-4b1d-8516-c2e6b2194048-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 29 12:31:42 crc kubenswrapper[4753]: I0129 12:31:42.123408 4753 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cec6584a-fcc6-4b1d-8516-c2e6b2194048-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 29 12:31:42 crc kubenswrapper[4753]: I0129 12:31:42.127192 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cec6584a-fcc6-4b1d-8516-c2e6b2194048-kube-api-access-pxf6k" (OuterVolumeSpecName: "kube-api-access-pxf6k") pod "cec6584a-fcc6-4b1d-8516-c2e6b2194048" (UID: "cec6584a-fcc6-4b1d-8516-c2e6b2194048"). InnerVolumeSpecName "kube-api-access-pxf6k". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:31:42 crc kubenswrapper[4753]: I0129 12:31:42.127389 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cec6584a-fcc6-4b1d-8516-c2e6b2194048-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "cec6584a-fcc6-4b1d-8516-c2e6b2194048" (UID: "cec6584a-fcc6-4b1d-8516-c2e6b2194048"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:31:42 crc kubenswrapper[4753]: I0129 12:31:42.157155 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cec6584a-fcc6-4b1d-8516-c2e6b2194048-config-data" (OuterVolumeSpecName: "config-data") pod "cec6584a-fcc6-4b1d-8516-c2e6b2194048" (UID: "cec6584a-fcc6-4b1d-8516-c2e6b2194048"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:31:42 crc kubenswrapper[4753]: I0129 12:31:42.182114 4753 generic.go:334] "Generic (PLEG): container finished" podID="cdf821d7-7cec-4c33-a59e-1c6493fd7281" containerID="7375ec7e5486a51f1304b60d7e06caa5a53e836b083d9d627059439991053ffd" exitCode=0 Jan 29 12:31:42 crc kubenswrapper[4753]: I0129 12:31:42.182160 4753 generic.go:334] "Generic (PLEG): container finished" podID="cdf821d7-7cec-4c33-a59e-1c6493fd7281" containerID="49988160277c5cc45dd8900eb16d03199ec0c4ffa40e5904a5dbb7b9d2078f17" exitCode=0 Jan 29 12:31:42 crc kubenswrapper[4753]: I0129 12:31:42.182171 4753 generic.go:334] "Generic (PLEG): container finished" podID="cdf821d7-7cec-4c33-a59e-1c6493fd7281" containerID="f0eddfb27e0b1ca03ff86905f0d1b81d674be9160c615a4f016f519d650869e7" exitCode=0 Jan 29 12:31:42 crc kubenswrapper[4753]: I0129 12:31:42.182173 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"cdf821d7-7cec-4c33-a59e-1c6493fd7281","Type":"ContainerDied","Data":"7375ec7e5486a51f1304b60d7e06caa5a53e836b083d9d627059439991053ffd"} Jan 29 12:31:42 crc kubenswrapper[4753]: I0129 12:31:42.182221 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"cdf821d7-7cec-4c33-a59e-1c6493fd7281","Type":"ContainerDied","Data":"49988160277c5cc45dd8900eb16d03199ec0c4ffa40e5904a5dbb7b9d2078f17"} Jan 29 12:31:42 crc kubenswrapper[4753]: I0129 12:31:42.182262 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"cdf821d7-7cec-4c33-a59e-1c6493fd7281","Type":"ContainerDied","Data":"f0eddfb27e0b1ca03ff86905f0d1b81d674be9160c615a4f016f519d650869e7"} Jan 29 12:31:42 crc kubenswrapper[4753]: I0129 12:31:42.185450 4753 generic.go:334] "Generic (PLEG): container finished" podID="cec6584a-fcc6-4b1d-8516-c2e6b2194048" containerID="3c03eaf094d75e935a3ac244d63b044ef319a92959b6f9fd416b8a3dbdbcc0da" exitCode=0 Jan 29 12:31:42 crc kubenswrapper[4753]: I0129 12:31:42.185509 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-proxy-6bc96d68cf-kbrl4" Jan 29 12:31:42 crc kubenswrapper[4753]: I0129 12:31:42.185548 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-proxy-6bc96d68cf-kbrl4" event={"ID":"cec6584a-fcc6-4b1d-8516-c2e6b2194048","Type":"ContainerDied","Data":"3c03eaf094d75e935a3ac244d63b044ef319a92959b6f9fd416b8a3dbdbcc0da"} Jan 29 12:31:42 crc kubenswrapper[4753]: I0129 12:31:42.185579 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-proxy-6bc96d68cf-kbrl4" event={"ID":"cec6584a-fcc6-4b1d-8516-c2e6b2194048","Type":"ContainerDied","Data":"447e090cd54397217cafbcf93cea61a90bcf39c3150f7863a7d006cb27b16131"} Jan 29 12:31:42 crc kubenswrapper[4753]: I0129 12:31:42.185641 4753 scope.go:117] "RemoveContainer" containerID="3c03eaf094d75e935a3ac244d63b044ef319a92959b6f9fd416b8a3dbdbcc0da" Jan 29 12:31:42 crc kubenswrapper[4753]: I0129 12:31:42.195826 4753 generic.go:334] "Generic (PLEG): container finished" podID="5f186cee-89fe-4c25-b227-628ffcb2a98f" containerID="cdf458faa8e41f1db5e8106107119c792aea631dcda870cad66d95e8a74440aa" exitCode=0 Jan 29 12:31:42 crc kubenswrapper[4753]: I0129 12:31:42.195863 4753 generic.go:334] "Generic (PLEG): container finished" podID="5f186cee-89fe-4c25-b227-628ffcb2a98f" containerID="824a9ea0f9ab12597c31967bb66834f5b783df0905b01089fcbd3905b858c483" exitCode=0 Jan 29 12:31:42 crc kubenswrapper[4753]: I0129 12:31:42.195874 4753 generic.go:334] "Generic (PLEG): container finished" podID="5f186cee-89fe-4c25-b227-628ffcb2a98f" containerID="9f7afb748e128f506597d0e087b167fcba9224b8b7016f8ee496a4591b0e162c" exitCode=0 Jan 29 12:31:42 crc kubenswrapper[4753]: I0129 12:31:42.195884 4753 generic.go:334] "Generic (PLEG): container finished" podID="5f186cee-89fe-4c25-b227-628ffcb2a98f" containerID="1737e0ba3b3d084614c0a7a06cc57cad8d732ad8ed13e0c65e0965dfefef1d63" exitCode=0 Jan 29 12:31:42 crc kubenswrapper[4753]: I0129 12:31:42.195934 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"5f186cee-89fe-4c25-b227-628ffcb2a98f","Type":"ContainerDied","Data":"cdf458faa8e41f1db5e8106107119c792aea631dcda870cad66d95e8a74440aa"} Jan 29 12:31:42 crc kubenswrapper[4753]: I0129 12:31:42.195967 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"5f186cee-89fe-4c25-b227-628ffcb2a98f","Type":"ContainerDied","Data":"824a9ea0f9ab12597c31967bb66834f5b783df0905b01089fcbd3905b858c483"} Jan 29 12:31:42 crc kubenswrapper[4753]: I0129 12:31:42.195980 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"5f186cee-89fe-4c25-b227-628ffcb2a98f","Type":"ContainerDied","Data":"9f7afb748e128f506597d0e087b167fcba9224b8b7016f8ee496a4591b0e162c"} Jan 29 12:31:42 crc kubenswrapper[4753]: I0129 12:31:42.195992 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"5f186cee-89fe-4c25-b227-628ffcb2a98f","Type":"ContainerDied","Data":"1737e0ba3b3d084614c0a7a06cc57cad8d732ad8ed13e0c65e0965dfefef1d63"} Jan 29 12:31:42 crc kubenswrapper[4753]: I0129 12:31:42.206450 4753 scope.go:117] "RemoveContainer" containerID="bdc2ce23aa0b4c724f557767c41572887c03d3fefc7a47884d3282150817b6d9" Jan 29 12:31:42 crc kubenswrapper[4753]: I0129 12:31:42.210468 4753 generic.go:334] "Generic (PLEG): container finished" podID="fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1" containerID="fea50e28f0174fd91362c9abb3b729cc30c5f58c114ab482b2127002cf3c630b" exitCode=0 Jan 29 12:31:42 crc kubenswrapper[4753]: I0129 12:31:42.210499 4753 generic.go:334] "Generic (PLEG): container finished" podID="fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1" containerID="0f829cdb3721a769b92e78700efd5425645fe4eed5ead53148401905c22bb46d" exitCode=0 Jan 29 12:31:42 crc kubenswrapper[4753]: I0129 12:31:42.210507 4753 generic.go:334] "Generic (PLEG): container finished" podID="fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1" containerID="ebbaa821837a219076c7e5eaecb2900a7f98b03a800d03b39a05eb1bc69709ae" exitCode=0 Jan 29 12:31:42 crc kubenswrapper[4753]: I0129 12:31:42.210532 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1","Type":"ContainerDied","Data":"fea50e28f0174fd91362c9abb3b729cc30c5f58c114ab482b2127002cf3c630b"} Jan 29 12:31:42 crc kubenswrapper[4753]: I0129 12:31:42.210563 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1","Type":"ContainerDied","Data":"0f829cdb3721a769b92e78700efd5425645fe4eed5ead53148401905c22bb46d"} Jan 29 12:31:42 crc kubenswrapper[4753]: I0129 12:31:42.210573 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1","Type":"ContainerDied","Data":"ebbaa821837a219076c7e5eaecb2900a7f98b03a800d03b39a05eb1bc69709ae"} Jan 29 12:31:42 crc kubenswrapper[4753]: I0129 12:31:42.220600 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["swift-kuttl-tests/swift-proxy-6bc96d68cf-kbrl4"] Jan 29 12:31:42 crc kubenswrapper[4753]: I0129 12:31:42.225481 4753 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/cec6584a-fcc6-4b1d-8516-c2e6b2194048-etc-swift\") on node \"crc\" DevicePath \"\"" Jan 29 12:31:42 crc kubenswrapper[4753]: I0129 12:31:42.225528 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pxf6k\" (UniqueName: \"kubernetes.io/projected/cec6584a-fcc6-4b1d-8516-c2e6b2194048-kube-api-access-pxf6k\") on node \"crc\" DevicePath \"\"" Jan 29 12:31:42 crc kubenswrapper[4753]: I0129 12:31:42.225541 4753 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cec6584a-fcc6-4b1d-8516-c2e6b2194048-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 12:31:42 crc kubenswrapper[4753]: I0129 12:31:42.227158 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["swift-kuttl-tests/swift-proxy-6bc96d68cf-kbrl4"] Jan 29 12:31:42 crc kubenswrapper[4753]: I0129 12:31:42.229763 4753 scope.go:117] "RemoveContainer" containerID="3c03eaf094d75e935a3ac244d63b044ef319a92959b6f9fd416b8a3dbdbcc0da" Jan 29 12:31:42 crc kubenswrapper[4753]: E0129 12:31:42.230367 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3c03eaf094d75e935a3ac244d63b044ef319a92959b6f9fd416b8a3dbdbcc0da\": container with ID starting with 3c03eaf094d75e935a3ac244d63b044ef319a92959b6f9fd416b8a3dbdbcc0da not found: ID does not exist" containerID="3c03eaf094d75e935a3ac244d63b044ef319a92959b6f9fd416b8a3dbdbcc0da" Jan 29 12:31:42 crc kubenswrapper[4753]: I0129 12:31:42.230408 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3c03eaf094d75e935a3ac244d63b044ef319a92959b6f9fd416b8a3dbdbcc0da"} err="failed to get container status \"3c03eaf094d75e935a3ac244d63b044ef319a92959b6f9fd416b8a3dbdbcc0da\": rpc error: code = NotFound desc = could not find container \"3c03eaf094d75e935a3ac244d63b044ef319a92959b6f9fd416b8a3dbdbcc0da\": container with ID starting with 3c03eaf094d75e935a3ac244d63b044ef319a92959b6f9fd416b8a3dbdbcc0da not found: ID does not exist" Jan 29 12:31:42 crc kubenswrapper[4753]: I0129 12:31:42.230436 4753 scope.go:117] "RemoveContainer" containerID="bdc2ce23aa0b4c724f557767c41572887c03d3fefc7a47884d3282150817b6d9" Jan 29 12:31:42 crc kubenswrapper[4753]: E0129 12:31:42.230866 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bdc2ce23aa0b4c724f557767c41572887c03d3fefc7a47884d3282150817b6d9\": container with ID starting with bdc2ce23aa0b4c724f557767c41572887c03d3fefc7a47884d3282150817b6d9 not found: ID does not exist" containerID="bdc2ce23aa0b4c724f557767c41572887c03d3fefc7a47884d3282150817b6d9" Jan 29 12:31:42 crc kubenswrapper[4753]: I0129 12:31:42.230893 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bdc2ce23aa0b4c724f557767c41572887c03d3fefc7a47884d3282150817b6d9"} err="failed to get container status \"bdc2ce23aa0b4c724f557767c41572887c03d3fefc7a47884d3282150817b6d9\": rpc error: code = NotFound desc = could not find container \"bdc2ce23aa0b4c724f557767c41572887c03d3fefc7a47884d3282150817b6d9\": container with ID starting with bdc2ce23aa0b4c724f557767c41572887c03d3fefc7a47884d3282150817b6d9 not found: ID does not exist" Jan 29 12:31:43 crc kubenswrapper[4753]: I0129 12:31:43.899701 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cec6584a-fcc6-4b1d-8516-c2e6b2194048" path="/var/lib/kubelet/pods/cec6584a-fcc6-4b1d-8516-c2e6b2194048/volumes" Jan 29 12:31:59 crc kubenswrapper[4753]: I0129 12:31:59.253085 4753 patch_prober.go:28] interesting pod/machine-config-daemon-7c24x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 12:31:59 crc kubenswrapper[4753]: I0129 12:31:59.253831 4753 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" podUID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.061508 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-storage-1" Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.129880 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-storage-2" Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.134977 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.248543 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tg5qb\" (UniqueName: \"kubernetes.io/projected/5f186cee-89fe-4c25-b227-628ffcb2a98f-kube-api-access-tg5qb\") pod \"5f186cee-89fe-4c25-b227-628ffcb2a98f\" (UID: \"5f186cee-89fe-4c25-b227-628ffcb2a98f\") " Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.248621 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lrf7t\" (UniqueName: \"kubernetes.io/projected/cdf821d7-7cec-4c33-a59e-1c6493fd7281-kube-api-access-lrf7t\") pod \"cdf821d7-7cec-4c33-a59e-1c6493fd7281\" (UID: \"cdf821d7-7cec-4c33-a59e-1c6493fd7281\") " Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.248680 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1-cache\") pod \"fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1\" (UID: \"fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1\") " Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.248712 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/5f186cee-89fe-4c25-b227-628ffcb2a98f-lock\") pod \"5f186cee-89fe-4c25-b227-628ffcb2a98f\" (UID: \"5f186cee-89fe-4c25-b227-628ffcb2a98f\") " Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.248740 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swift\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1\" (UID: \"fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1\") " Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.248805 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1-etc-swift\") pod \"fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1\" (UID: \"fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1\") " Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.248859 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/cdf821d7-7cec-4c33-a59e-1c6493fd7281-cache\") pod \"cdf821d7-7cec-4c33-a59e-1c6493fd7281\" (UID: \"cdf821d7-7cec-4c33-a59e-1c6493fd7281\") " Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.248908 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/5f186cee-89fe-4c25-b227-628ffcb2a98f-etc-swift\") pod \"5f186cee-89fe-4c25-b227-628ffcb2a98f\" (UID: \"5f186cee-89fe-4c25-b227-628ffcb2a98f\") " Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.248937 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/5f186cee-89fe-4c25-b227-628ffcb2a98f-cache\") pod \"5f186cee-89fe-4c25-b227-628ffcb2a98f\" (UID: \"5f186cee-89fe-4c25-b227-628ffcb2a98f\") " Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.248969 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1-lock\") pod \"fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1\" (UID: \"fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1\") " Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.249000 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swift\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"cdf821d7-7cec-4c33-a59e-1c6493fd7281\" (UID: \"cdf821d7-7cec-4c33-a59e-1c6493fd7281\") " Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.249086 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f6wsh\" (UniqueName: \"kubernetes.io/projected/fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1-kube-api-access-f6wsh\") pod \"fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1\" (UID: \"fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1\") " Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.249124 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/cdf821d7-7cec-4c33-a59e-1c6493fd7281-etc-swift\") pod \"cdf821d7-7cec-4c33-a59e-1c6493fd7281\" (UID: \"cdf821d7-7cec-4c33-a59e-1c6493fd7281\") " Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.249153 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swift\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"5f186cee-89fe-4c25-b227-628ffcb2a98f\" (UID: \"5f186cee-89fe-4c25-b227-628ffcb2a98f\") " Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.249183 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/cdf821d7-7cec-4c33-a59e-1c6493fd7281-lock\") pod \"cdf821d7-7cec-4c33-a59e-1c6493fd7281\" (UID: \"cdf821d7-7cec-4c33-a59e-1c6493fd7281\") " Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.250499 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1-lock" (OuterVolumeSpecName: "lock") pod "fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1" (UID: "fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1"). InnerVolumeSpecName "lock". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.251048 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5f186cee-89fe-4c25-b227-628ffcb2a98f-cache" (OuterVolumeSpecName: "cache") pod "5f186cee-89fe-4c25-b227-628ffcb2a98f" (UID: "5f186cee-89fe-4c25-b227-628ffcb2a98f"). InnerVolumeSpecName "cache". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.251566 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1-cache" (OuterVolumeSpecName: "cache") pod "fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1" (UID: "fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1"). InnerVolumeSpecName "cache". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.251588 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5f186cee-89fe-4c25-b227-628ffcb2a98f-lock" (OuterVolumeSpecName: "lock") pod "5f186cee-89fe-4c25-b227-628ffcb2a98f" (UID: "5f186cee-89fe-4c25-b227-628ffcb2a98f"). InnerVolumeSpecName "lock". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.251938 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cdf821d7-7cec-4c33-a59e-1c6493fd7281-cache" (OuterVolumeSpecName: "cache") pod "cdf821d7-7cec-4c33-a59e-1c6493fd7281" (UID: "cdf821d7-7cec-4c33-a59e-1c6493fd7281"). InnerVolumeSpecName "cache". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.252078 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cdf821d7-7cec-4c33-a59e-1c6493fd7281-lock" (OuterVolumeSpecName: "lock") pod "cdf821d7-7cec-4c33-a59e-1c6493fd7281" (UID: "cdf821d7-7cec-4c33-a59e-1c6493fd7281"). InnerVolumeSpecName "lock". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.256199 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1-kube-api-access-f6wsh" (OuterVolumeSpecName: "kube-api-access-f6wsh") pod "fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1" (UID: "fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1"). InnerVolumeSpecName "kube-api-access-f6wsh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.256362 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage08-crc" (OuterVolumeSpecName: "swift") pod "cdf821d7-7cec-4c33-a59e-1c6493fd7281" (UID: "cdf821d7-7cec-4c33-a59e-1c6493fd7281"). InnerVolumeSpecName "local-storage08-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.256505 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage02-crc" (OuterVolumeSpecName: "swift") pod "fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1" (UID: "fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1"). InnerVolumeSpecName "local-storage02-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.256849 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cdf821d7-7cec-4c33-a59e-1c6493fd7281-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "cdf821d7-7cec-4c33-a59e-1c6493fd7281" (UID: "cdf821d7-7cec-4c33-a59e-1c6493fd7281"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.257325 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5f186cee-89fe-4c25-b227-628ffcb2a98f-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "5f186cee-89fe-4c25-b227-628ffcb2a98f" (UID: "5f186cee-89fe-4c25-b227-628ffcb2a98f"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.257708 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1" (UID: "fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.260699 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage09-crc" (OuterVolumeSpecName: "swift") pod "5f186cee-89fe-4c25-b227-628ffcb2a98f" (UID: "5f186cee-89fe-4c25-b227-628ffcb2a98f"). InnerVolumeSpecName "local-storage09-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.260700 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5f186cee-89fe-4c25-b227-628ffcb2a98f-kube-api-access-tg5qb" (OuterVolumeSpecName: "kube-api-access-tg5qb") pod "5f186cee-89fe-4c25-b227-628ffcb2a98f" (UID: "5f186cee-89fe-4c25-b227-628ffcb2a98f"). InnerVolumeSpecName "kube-api-access-tg5qb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.260734 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cdf821d7-7cec-4c33-a59e-1c6493fd7281-kube-api-access-lrf7t" (OuterVolumeSpecName: "kube-api-access-lrf7t") pod "cdf821d7-7cec-4c33-a59e-1c6493fd7281" (UID: "cdf821d7-7cec-4c33-a59e-1c6493fd7281"). InnerVolumeSpecName "kube-api-access-lrf7t". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.351026 4753 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/5f186cee-89fe-4c25-b227-628ffcb2a98f-etc-swift\") on node \"crc\" DevicePath \"\"" Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.351075 4753 reconciler_common.go:293] "Volume detached for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/5f186cee-89fe-4c25-b227-628ffcb2a98f-cache\") on node \"crc\" DevicePath \"\"" Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.351098 4753 reconciler_common.go:293] "Volume detached for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1-lock\") on node \"crc\" DevicePath \"\"" Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.351149 4753 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" " Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.351169 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f6wsh\" (UniqueName: \"kubernetes.io/projected/fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1-kube-api-access-f6wsh\") on node \"crc\" DevicePath \"\"" Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.351184 4753 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/cdf821d7-7cec-4c33-a59e-1c6493fd7281-etc-swift\") on node \"crc\" DevicePath \"\"" Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.351201 4753 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" " Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.351212 4753 reconciler_common.go:293] "Volume detached for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/cdf821d7-7cec-4c33-a59e-1c6493fd7281-lock\") on node \"crc\" DevicePath \"\"" Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.351244 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tg5qb\" (UniqueName: \"kubernetes.io/projected/5f186cee-89fe-4c25-b227-628ffcb2a98f-kube-api-access-tg5qb\") on node \"crc\" DevicePath \"\"" Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.351258 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lrf7t\" (UniqueName: \"kubernetes.io/projected/cdf821d7-7cec-4c33-a59e-1c6493fd7281-kube-api-access-lrf7t\") on node \"crc\" DevicePath \"\"" Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.351271 4753 reconciler_common.go:293] "Volume detached for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1-cache\") on node \"crc\" DevicePath \"\"" Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.351291 4753 reconciler_common.go:293] "Volume detached for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/5f186cee-89fe-4c25-b227-628ffcb2a98f-lock\") on node \"crc\" DevicePath \"\"" Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.351309 4753 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" " Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.351320 4753 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1-etc-swift\") on node \"crc\" DevicePath \"\"" Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.351330 4753 reconciler_common.go:293] "Volume detached for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/cdf821d7-7cec-4c33-a59e-1c6493fd7281-cache\") on node \"crc\" DevicePath \"\"" Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.364187 4753 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage08-crc" (UniqueName: "kubernetes.io/local-volume/local-storage08-crc") on node "crc" Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.366061 4753 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage02-crc" (UniqueName: "kubernetes.io/local-volume/local-storage02-crc") on node "crc" Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.367539 4753 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage09-crc" (UniqueName: "kubernetes.io/local-volume/local-storage09-crc") on node "crc" Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.452641 4753 reconciler_common.go:293] "Volume detached for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" DevicePath \"\"" Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.452701 4753 reconciler_common.go:293] "Volume detached for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" DevicePath \"\"" Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.452721 4753 reconciler_common.go:293] "Volume detached for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" DevicePath \"\"" Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.529955 4753 generic.go:334] "Generic (PLEG): container finished" podID="5f186cee-89fe-4c25-b227-628ffcb2a98f" containerID="0a2445c3569e08520140f26ebf5205ae7e804cd6709b85f2a1fb89ce86ea5674" exitCode=137 Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.530076 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"5f186cee-89fe-4c25-b227-628ffcb2a98f","Type":"ContainerDied","Data":"0a2445c3569e08520140f26ebf5205ae7e804cd6709b85f2a1fb89ce86ea5674"} Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.530127 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"5f186cee-89fe-4c25-b227-628ffcb2a98f","Type":"ContainerDied","Data":"c07f9789194850b328b4afa31d64c118db797aea81cbec08e7d72941358f62f3"} Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.530211 4753 scope.go:117] "RemoveContainer" containerID="0a2445c3569e08520140f26ebf5205ae7e804cd6709b85f2a1fb89ce86ea5674" Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.530492 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-storage-2" Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.540945 4753 generic.go:334] "Generic (PLEG): container finished" podID="fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1" containerID="b8020c59d568fbfdd9bf91a35ad05830bd19b500dc075b3d4bc3eda195246fab" exitCode=137 Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.541336 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.541122 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1","Type":"ContainerDied","Data":"b8020c59d568fbfdd9bf91a35ad05830bd19b500dc075b3d4bc3eda195246fab"} Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.541410 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1","Type":"ContainerDied","Data":"4f15ddd8fa257ee30591ad3a5ca43ecefbf5d5a4f7894aff28107a76d04f08d1"} Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.541446 4753 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"fea50e28f0174fd91362c9abb3b729cc30c5f58c114ab482b2127002cf3c630b"} Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.541462 4753 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"21a68569c06ad75010210c795bc28381cfc8966ffbce36fdbe1e636ab5477089"} Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.541468 4753 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"863f0dd428950d22ff5bb1d391e9d5b81ac1a1cbbe4e695d9602a641d07a1a22"} Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.541472 4753 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"b75616dd1d2571d9087cbb56f8d01e12a0d244229467ec074a4010fb12da21f3"} Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.541478 4753 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"0f829cdb3721a769b92e78700efd5425645fe4eed5ead53148401905c22bb46d"} Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.541484 4753 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"1661471db32399586faa01821a0cd7e36b5025a57e9a8d4f08b6a0cdc8302d89"} Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.541489 4753 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4bf0d941158a40c1b8708f56a8bbbbd8f371ac2b901f4843bcdc628feb29b830"} Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.541495 4753 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"26ced7e34171d6bfce760564034338cc137c0fec95e21a1eda4b638ce903778b"} Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.541499 4753 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ebbaa821837a219076c7e5eaecb2900a7f98b03a800d03b39a05eb1bc69709ae"} Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.552270 4753 generic.go:334] "Generic (PLEG): container finished" podID="cdf821d7-7cec-4c33-a59e-1c6493fd7281" containerID="194bd65bd1293b044532e09dbd55bdc880c52603b4ee58046654673e07e25e17" exitCode=137 Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.552313 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"cdf821d7-7cec-4c33-a59e-1c6493fd7281","Type":"ContainerDied","Data":"194bd65bd1293b044532e09dbd55bdc880c52603b4ee58046654673e07e25e17"} Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.552336 4753 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"194bd65bd1293b044532e09dbd55bdc880c52603b4ee58046654673e07e25e17"} Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.552350 4753 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"e93a1c2eebca268c75cadde6e248adb908e05fb31946984b83eff825afbd1777"} Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.552356 4753 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"e0df4d24defb12bbb6148384cb20d00499a3423ed6132f66831d8d0399b66e27"} Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.552362 4753 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"16c189b240c3b6af0b5143c94f27ad8f278cf55c71fde6ff59b1123203d274ec"} Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.552367 4753 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"d29a9d10061279a2805ca6403127e650b1bbfe36a4163df4e079fae54c5a5970"} Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.552372 4753 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4f9f3bd4b5dcda78683a4fd9f6169c0ba4cacc4f6a05deb5fb5b67519e1959ef"} Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.552377 4753 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"7375ec7e5486a51f1304b60d7e06caa5a53e836b083d9d627059439991053ffd"} Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.552385 4753 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"0aec20004c67a50d19a5c8d1cf7ed9018b4b95c39a5978455971cdd8179addd1"} Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.552391 4753 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"3d01ab550711d3f6ed3858797bfec7a7069bc75766d3c545e26de6dea09f5fb8"} Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.552395 4753 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"512e69e3786e84e63a587d8d9745a6408e329502de45c30a1be8a2813fce175e"} Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.552400 4753 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"49988160277c5cc45dd8900eb16d03199ec0c4ffa40e5904a5dbb7b9d2078f17"} Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.552405 4753 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"6d62737aaefa0ae329ab69794e4a09fdbf4a1956c0ade81f0a19a89a0f563158"} Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.552410 4753 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"b028c59c7810639143318f825119fe54eacc1a661cb573dc2455ed7e2ba850ed"} Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.552415 4753 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"54791b5e4f8e371f9007a17143cbc5838793bd8d3e1ce65c2822161e12d6146c"} Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.552420 4753 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"f0eddfb27e0b1ca03ff86905f0d1b81d674be9160c615a4f016f519d650869e7"} Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.552432 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"cdf821d7-7cec-4c33-a59e-1c6493fd7281","Type":"ContainerDied","Data":"98865a6d1c05bb7d82e89d85309da0a159d575dfadab0935076448e89d09eb7c"} Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.552440 4753 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"194bd65bd1293b044532e09dbd55bdc880c52603b4ee58046654673e07e25e17"} Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.552447 4753 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"e93a1c2eebca268c75cadde6e248adb908e05fb31946984b83eff825afbd1777"} Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.552452 4753 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"e0df4d24defb12bbb6148384cb20d00499a3423ed6132f66831d8d0399b66e27"} Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.552457 4753 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"16c189b240c3b6af0b5143c94f27ad8f278cf55c71fde6ff59b1123203d274ec"} Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.552462 4753 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"d29a9d10061279a2805ca6403127e650b1bbfe36a4163df4e079fae54c5a5970"} Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.552467 4753 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4f9f3bd4b5dcda78683a4fd9f6169c0ba4cacc4f6a05deb5fb5b67519e1959ef"} Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.552472 4753 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"7375ec7e5486a51f1304b60d7e06caa5a53e836b083d9d627059439991053ffd"} Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.552476 4753 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"0aec20004c67a50d19a5c8d1cf7ed9018b4b95c39a5978455971cdd8179addd1"} Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.552482 4753 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"3d01ab550711d3f6ed3858797bfec7a7069bc75766d3c545e26de6dea09f5fb8"} Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.552487 4753 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"512e69e3786e84e63a587d8d9745a6408e329502de45c30a1be8a2813fce175e"} Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.552492 4753 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"49988160277c5cc45dd8900eb16d03199ec0c4ffa40e5904a5dbb7b9d2078f17"} Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.552497 4753 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"6d62737aaefa0ae329ab69794e4a09fdbf4a1956c0ade81f0a19a89a0f563158"} Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.552502 4753 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"b028c59c7810639143318f825119fe54eacc1a661cb573dc2455ed7e2ba850ed"} Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.552507 4753 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"54791b5e4f8e371f9007a17143cbc5838793bd8d3e1ce65c2822161e12d6146c"} Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.552512 4753 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"f0eddfb27e0b1ca03ff86905f0d1b81d674be9160c615a4f016f519d650869e7"} Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.552599 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-storage-1" Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.574862 4753 scope.go:117] "RemoveContainer" containerID="cdf458faa8e41f1db5e8106107119c792aea631dcda870cad66d95e8a74440aa" Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.583902 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["swift-kuttl-tests/swift-storage-2"] Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.601369 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["swift-kuttl-tests/swift-storage-2"] Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.631309 4753 scope.go:117] "RemoveContainer" containerID="50d6ac3c69c7caed60fbf1e5bb69f1a917f89479ed3e932b76121dae17629596" Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.639424 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["swift-kuttl-tests/swift-storage-0"] Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.810182 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["swift-kuttl-tests/swift-storage-0"] Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.815068 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["swift-kuttl-tests/swift-storage-1"] Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.820458 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["swift-kuttl-tests/swift-storage-1"] Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.840612 4753 scope.go:117] "RemoveContainer" containerID="afe9540d6c8487275d7527ca39a1a61852a5605ffc987d75def8883cdd9c9d91" Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.883530 4753 scope.go:117] "RemoveContainer" containerID="c59dfabc0782c5e45570ebd81541d4892c4494e9a05b790d9d0166ca98e35355" Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.896827 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5f186cee-89fe-4c25-b227-628ffcb2a98f" path="/var/lib/kubelet/pods/5f186cee-89fe-4c25-b227-628ffcb2a98f/volumes" Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.899303 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cdf821d7-7cec-4c33-a59e-1c6493fd7281" path="/var/lib/kubelet/pods/cdf821d7-7cec-4c33-a59e-1c6493fd7281/volumes" Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.901818 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1" path="/var/lib/kubelet/pods/fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1/volumes" Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.909912 4753 scope.go:117] "RemoveContainer" containerID="b1ec4dfcf68549610147c8cd4d5995a51e66768570c5c1fdb817858b572a2aef" Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.928923 4753 scope.go:117] "RemoveContainer" containerID="824a9ea0f9ab12597c31967bb66834f5b783df0905b01089fcbd3905b858c483" Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.944372 4753 scope.go:117] "RemoveContainer" containerID="d112d59361aa4821a24ce857e58ec6ead1b63c9892c956f350e18f52f9e10e65" Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.961830 4753 scope.go:117] "RemoveContainer" containerID="3225ffbf14a2f46af26051f69cf072841186d28c18bf03982b6718349f799de8" Jan 29 12:32:11 crc kubenswrapper[4753]: I0129 12:32:11.979801 4753 scope.go:117] "RemoveContainer" containerID="3705c5801c3eba8ddc1d2890a6acdfcdf4c62e5da07c136d125988be1e360884" Jan 29 12:32:12 crc kubenswrapper[4753]: I0129 12:32:12.007050 4753 scope.go:117] "RemoveContainer" containerID="9f7afb748e128f506597d0e087b167fcba9224b8b7016f8ee496a4591b0e162c" Jan 29 12:32:12 crc kubenswrapper[4753]: I0129 12:32:12.025864 4753 scope.go:117] "RemoveContainer" containerID="31d6993374213dd3b7e564d0a4085cc39908df3ddaf6e54182f707a289ddab96" Jan 29 12:32:12 crc kubenswrapper[4753]: I0129 12:32:12.043030 4753 scope.go:117] "RemoveContainer" containerID="056e27f46aca1e39fcb6267b02e6b8748de9f6836da7bd55a0b5148163a59cdf" Jan 29 12:32:12 crc kubenswrapper[4753]: I0129 12:32:12.063313 4753 scope.go:117] "RemoveContainer" containerID="8ade60df808d454198934fc448217bcca19a3633d29a12c5a706cb873f806811" Jan 29 12:32:12 crc kubenswrapper[4753]: I0129 12:32:12.078778 4753 scope.go:117] "RemoveContainer" containerID="1737e0ba3b3d084614c0a7a06cc57cad8d732ad8ed13e0c65e0965dfefef1d63" Jan 29 12:32:12 crc kubenswrapper[4753]: I0129 12:32:12.103014 4753 scope.go:117] "RemoveContainer" containerID="0a2445c3569e08520140f26ebf5205ae7e804cd6709b85f2a1fb89ce86ea5674" Jan 29 12:32:12 crc kubenswrapper[4753]: E0129 12:32:12.105390 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0a2445c3569e08520140f26ebf5205ae7e804cd6709b85f2a1fb89ce86ea5674\": container with ID starting with 0a2445c3569e08520140f26ebf5205ae7e804cd6709b85f2a1fb89ce86ea5674 not found: ID does not exist" containerID="0a2445c3569e08520140f26ebf5205ae7e804cd6709b85f2a1fb89ce86ea5674" Jan 29 12:32:12 crc kubenswrapper[4753]: I0129 12:32:12.105567 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0a2445c3569e08520140f26ebf5205ae7e804cd6709b85f2a1fb89ce86ea5674"} err="failed to get container status \"0a2445c3569e08520140f26ebf5205ae7e804cd6709b85f2a1fb89ce86ea5674\": rpc error: code = NotFound desc = could not find container \"0a2445c3569e08520140f26ebf5205ae7e804cd6709b85f2a1fb89ce86ea5674\": container with ID starting with 0a2445c3569e08520140f26ebf5205ae7e804cd6709b85f2a1fb89ce86ea5674 not found: ID does not exist" Jan 29 12:32:12 crc kubenswrapper[4753]: I0129 12:32:12.105807 4753 scope.go:117] "RemoveContainer" containerID="cdf458faa8e41f1db5e8106107119c792aea631dcda870cad66d95e8a74440aa" Jan 29 12:32:12 crc kubenswrapper[4753]: E0129 12:32:12.106371 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cdf458faa8e41f1db5e8106107119c792aea631dcda870cad66d95e8a74440aa\": container with ID starting with cdf458faa8e41f1db5e8106107119c792aea631dcda870cad66d95e8a74440aa not found: ID does not exist" containerID="cdf458faa8e41f1db5e8106107119c792aea631dcda870cad66d95e8a74440aa" Jan 29 12:32:12 crc kubenswrapper[4753]: I0129 12:32:12.106470 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cdf458faa8e41f1db5e8106107119c792aea631dcda870cad66d95e8a74440aa"} err="failed to get container status \"cdf458faa8e41f1db5e8106107119c792aea631dcda870cad66d95e8a74440aa\": rpc error: code = NotFound desc = could not find container \"cdf458faa8e41f1db5e8106107119c792aea631dcda870cad66d95e8a74440aa\": container with ID starting with cdf458faa8e41f1db5e8106107119c792aea631dcda870cad66d95e8a74440aa not found: ID does not exist" Jan 29 12:32:12 crc kubenswrapper[4753]: I0129 12:32:12.106538 4753 scope.go:117] "RemoveContainer" containerID="50d6ac3c69c7caed60fbf1e5bb69f1a917f89479ed3e932b76121dae17629596" Jan 29 12:32:12 crc kubenswrapper[4753]: E0129 12:32:12.107180 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"50d6ac3c69c7caed60fbf1e5bb69f1a917f89479ed3e932b76121dae17629596\": container with ID starting with 50d6ac3c69c7caed60fbf1e5bb69f1a917f89479ed3e932b76121dae17629596 not found: ID does not exist" containerID="50d6ac3c69c7caed60fbf1e5bb69f1a917f89479ed3e932b76121dae17629596" Jan 29 12:32:12 crc kubenswrapper[4753]: I0129 12:32:12.107207 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"50d6ac3c69c7caed60fbf1e5bb69f1a917f89479ed3e932b76121dae17629596"} err="failed to get container status \"50d6ac3c69c7caed60fbf1e5bb69f1a917f89479ed3e932b76121dae17629596\": rpc error: code = NotFound desc = could not find container \"50d6ac3c69c7caed60fbf1e5bb69f1a917f89479ed3e932b76121dae17629596\": container with ID starting with 50d6ac3c69c7caed60fbf1e5bb69f1a917f89479ed3e932b76121dae17629596 not found: ID does not exist" Jan 29 12:32:12 crc kubenswrapper[4753]: I0129 12:32:12.107241 4753 scope.go:117] "RemoveContainer" containerID="afe9540d6c8487275d7527ca39a1a61852a5605ffc987d75def8883cdd9c9d91" Jan 29 12:32:12 crc kubenswrapper[4753]: E0129 12:32:12.107771 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"afe9540d6c8487275d7527ca39a1a61852a5605ffc987d75def8883cdd9c9d91\": container with ID starting with afe9540d6c8487275d7527ca39a1a61852a5605ffc987d75def8883cdd9c9d91 not found: ID does not exist" containerID="afe9540d6c8487275d7527ca39a1a61852a5605ffc987d75def8883cdd9c9d91" Jan 29 12:32:12 crc kubenswrapper[4753]: I0129 12:32:12.107850 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"afe9540d6c8487275d7527ca39a1a61852a5605ffc987d75def8883cdd9c9d91"} err="failed to get container status \"afe9540d6c8487275d7527ca39a1a61852a5605ffc987d75def8883cdd9c9d91\": rpc error: code = NotFound desc = could not find container \"afe9540d6c8487275d7527ca39a1a61852a5605ffc987d75def8883cdd9c9d91\": container with ID starting with afe9540d6c8487275d7527ca39a1a61852a5605ffc987d75def8883cdd9c9d91 not found: ID does not exist" Jan 29 12:32:12 crc kubenswrapper[4753]: I0129 12:32:12.107927 4753 scope.go:117] "RemoveContainer" containerID="c59dfabc0782c5e45570ebd81541d4892c4494e9a05b790d9d0166ca98e35355" Jan 29 12:32:12 crc kubenswrapper[4753]: E0129 12:32:12.108304 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c59dfabc0782c5e45570ebd81541d4892c4494e9a05b790d9d0166ca98e35355\": container with ID starting with c59dfabc0782c5e45570ebd81541d4892c4494e9a05b790d9d0166ca98e35355 not found: ID does not exist" containerID="c59dfabc0782c5e45570ebd81541d4892c4494e9a05b790d9d0166ca98e35355" Jan 29 12:32:12 crc kubenswrapper[4753]: I0129 12:32:12.108362 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c59dfabc0782c5e45570ebd81541d4892c4494e9a05b790d9d0166ca98e35355"} err="failed to get container status \"c59dfabc0782c5e45570ebd81541d4892c4494e9a05b790d9d0166ca98e35355\": rpc error: code = NotFound desc = could not find container \"c59dfabc0782c5e45570ebd81541d4892c4494e9a05b790d9d0166ca98e35355\": container with ID starting with c59dfabc0782c5e45570ebd81541d4892c4494e9a05b790d9d0166ca98e35355 not found: ID does not exist" Jan 29 12:32:12 crc kubenswrapper[4753]: I0129 12:32:12.108381 4753 scope.go:117] "RemoveContainer" containerID="b1ec4dfcf68549610147c8cd4d5995a51e66768570c5c1fdb817858b572a2aef" Jan 29 12:32:12 crc kubenswrapper[4753]: E0129 12:32:12.108704 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b1ec4dfcf68549610147c8cd4d5995a51e66768570c5c1fdb817858b572a2aef\": container with ID starting with b1ec4dfcf68549610147c8cd4d5995a51e66768570c5c1fdb817858b572a2aef not found: ID does not exist" containerID="b1ec4dfcf68549610147c8cd4d5995a51e66768570c5c1fdb817858b572a2aef" Jan 29 12:32:12 crc kubenswrapper[4753]: I0129 12:32:12.108748 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b1ec4dfcf68549610147c8cd4d5995a51e66768570c5c1fdb817858b572a2aef"} err="failed to get container status \"b1ec4dfcf68549610147c8cd4d5995a51e66768570c5c1fdb817858b572a2aef\": rpc error: code = NotFound desc = could not find container \"b1ec4dfcf68549610147c8cd4d5995a51e66768570c5c1fdb817858b572a2aef\": container with ID starting with b1ec4dfcf68549610147c8cd4d5995a51e66768570c5c1fdb817858b572a2aef not found: ID does not exist" Jan 29 12:32:12 crc kubenswrapper[4753]: I0129 12:32:12.108762 4753 scope.go:117] "RemoveContainer" containerID="824a9ea0f9ab12597c31967bb66834f5b783df0905b01089fcbd3905b858c483" Jan 29 12:32:12 crc kubenswrapper[4753]: E0129 12:32:12.108999 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"824a9ea0f9ab12597c31967bb66834f5b783df0905b01089fcbd3905b858c483\": container with ID starting with 824a9ea0f9ab12597c31967bb66834f5b783df0905b01089fcbd3905b858c483 not found: ID does not exist" containerID="824a9ea0f9ab12597c31967bb66834f5b783df0905b01089fcbd3905b858c483" Jan 29 12:32:12 crc kubenswrapper[4753]: I0129 12:32:12.109016 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"824a9ea0f9ab12597c31967bb66834f5b783df0905b01089fcbd3905b858c483"} err="failed to get container status \"824a9ea0f9ab12597c31967bb66834f5b783df0905b01089fcbd3905b858c483\": rpc error: code = NotFound desc = could not find container \"824a9ea0f9ab12597c31967bb66834f5b783df0905b01089fcbd3905b858c483\": container with ID starting with 824a9ea0f9ab12597c31967bb66834f5b783df0905b01089fcbd3905b858c483 not found: ID does not exist" Jan 29 12:32:12 crc kubenswrapper[4753]: I0129 12:32:12.109028 4753 scope.go:117] "RemoveContainer" containerID="d112d59361aa4821a24ce857e58ec6ead1b63c9892c956f350e18f52f9e10e65" Jan 29 12:32:12 crc kubenswrapper[4753]: E0129 12:32:12.109532 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d112d59361aa4821a24ce857e58ec6ead1b63c9892c956f350e18f52f9e10e65\": container with ID starting with d112d59361aa4821a24ce857e58ec6ead1b63c9892c956f350e18f52f9e10e65 not found: ID does not exist" containerID="d112d59361aa4821a24ce857e58ec6ead1b63c9892c956f350e18f52f9e10e65" Jan 29 12:32:12 crc kubenswrapper[4753]: I0129 12:32:12.109632 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d112d59361aa4821a24ce857e58ec6ead1b63c9892c956f350e18f52f9e10e65"} err="failed to get container status \"d112d59361aa4821a24ce857e58ec6ead1b63c9892c956f350e18f52f9e10e65\": rpc error: code = NotFound desc = could not find container \"d112d59361aa4821a24ce857e58ec6ead1b63c9892c956f350e18f52f9e10e65\": container with ID starting with d112d59361aa4821a24ce857e58ec6ead1b63c9892c956f350e18f52f9e10e65 not found: ID does not exist" Jan 29 12:32:12 crc kubenswrapper[4753]: I0129 12:32:12.109734 4753 scope.go:117] "RemoveContainer" containerID="3225ffbf14a2f46af26051f69cf072841186d28c18bf03982b6718349f799de8" Jan 29 12:32:12 crc kubenswrapper[4753]: E0129 12:32:12.110269 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3225ffbf14a2f46af26051f69cf072841186d28c18bf03982b6718349f799de8\": container with ID starting with 3225ffbf14a2f46af26051f69cf072841186d28c18bf03982b6718349f799de8 not found: ID does not exist" containerID="3225ffbf14a2f46af26051f69cf072841186d28c18bf03982b6718349f799de8" Jan 29 12:32:12 crc kubenswrapper[4753]: I0129 12:32:12.110312 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3225ffbf14a2f46af26051f69cf072841186d28c18bf03982b6718349f799de8"} err="failed to get container status \"3225ffbf14a2f46af26051f69cf072841186d28c18bf03982b6718349f799de8\": rpc error: code = NotFound desc = could not find container \"3225ffbf14a2f46af26051f69cf072841186d28c18bf03982b6718349f799de8\": container with ID starting with 3225ffbf14a2f46af26051f69cf072841186d28c18bf03982b6718349f799de8 not found: ID does not exist" Jan 29 12:32:12 crc kubenswrapper[4753]: I0129 12:32:12.110328 4753 scope.go:117] "RemoveContainer" containerID="3705c5801c3eba8ddc1d2890a6acdfcdf4c62e5da07c136d125988be1e360884" Jan 29 12:32:12 crc kubenswrapper[4753]: E0129 12:32:12.110561 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3705c5801c3eba8ddc1d2890a6acdfcdf4c62e5da07c136d125988be1e360884\": container with ID starting with 3705c5801c3eba8ddc1d2890a6acdfcdf4c62e5da07c136d125988be1e360884 not found: ID does not exist" containerID="3705c5801c3eba8ddc1d2890a6acdfcdf4c62e5da07c136d125988be1e360884" Jan 29 12:32:12 crc kubenswrapper[4753]: I0129 12:32:12.110582 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3705c5801c3eba8ddc1d2890a6acdfcdf4c62e5da07c136d125988be1e360884"} err="failed to get container status \"3705c5801c3eba8ddc1d2890a6acdfcdf4c62e5da07c136d125988be1e360884\": rpc error: code = NotFound desc = could not find container \"3705c5801c3eba8ddc1d2890a6acdfcdf4c62e5da07c136d125988be1e360884\": container with ID starting with 3705c5801c3eba8ddc1d2890a6acdfcdf4c62e5da07c136d125988be1e360884 not found: ID does not exist" Jan 29 12:32:12 crc kubenswrapper[4753]: I0129 12:32:12.110596 4753 scope.go:117] "RemoveContainer" containerID="9f7afb748e128f506597d0e087b167fcba9224b8b7016f8ee496a4591b0e162c" Jan 29 12:32:12 crc kubenswrapper[4753]: E0129 12:32:12.110817 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9f7afb748e128f506597d0e087b167fcba9224b8b7016f8ee496a4591b0e162c\": container with ID starting with 9f7afb748e128f506597d0e087b167fcba9224b8b7016f8ee496a4591b0e162c not found: ID does not exist" containerID="9f7afb748e128f506597d0e087b167fcba9224b8b7016f8ee496a4591b0e162c" Jan 29 12:32:12 crc kubenswrapper[4753]: I0129 12:32:12.110836 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9f7afb748e128f506597d0e087b167fcba9224b8b7016f8ee496a4591b0e162c"} err="failed to get container status \"9f7afb748e128f506597d0e087b167fcba9224b8b7016f8ee496a4591b0e162c\": rpc error: code = NotFound desc = could not find container \"9f7afb748e128f506597d0e087b167fcba9224b8b7016f8ee496a4591b0e162c\": container with ID starting with 9f7afb748e128f506597d0e087b167fcba9224b8b7016f8ee496a4591b0e162c not found: ID does not exist" Jan 29 12:32:12 crc kubenswrapper[4753]: I0129 12:32:12.110870 4753 scope.go:117] "RemoveContainer" containerID="31d6993374213dd3b7e564d0a4085cc39908df3ddaf6e54182f707a289ddab96" Jan 29 12:32:12 crc kubenswrapper[4753]: E0129 12:32:12.111113 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"31d6993374213dd3b7e564d0a4085cc39908df3ddaf6e54182f707a289ddab96\": container with ID starting with 31d6993374213dd3b7e564d0a4085cc39908df3ddaf6e54182f707a289ddab96 not found: ID does not exist" containerID="31d6993374213dd3b7e564d0a4085cc39908df3ddaf6e54182f707a289ddab96" Jan 29 12:32:12 crc kubenswrapper[4753]: I0129 12:32:12.111367 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"31d6993374213dd3b7e564d0a4085cc39908df3ddaf6e54182f707a289ddab96"} err="failed to get container status \"31d6993374213dd3b7e564d0a4085cc39908df3ddaf6e54182f707a289ddab96\": rpc error: code = NotFound desc = could not find container \"31d6993374213dd3b7e564d0a4085cc39908df3ddaf6e54182f707a289ddab96\": container with ID starting with 31d6993374213dd3b7e564d0a4085cc39908df3ddaf6e54182f707a289ddab96 not found: ID does not exist" Jan 29 12:32:12 crc kubenswrapper[4753]: I0129 12:32:12.111402 4753 scope.go:117] "RemoveContainer" containerID="056e27f46aca1e39fcb6267b02e6b8748de9f6836da7bd55a0b5148163a59cdf" Jan 29 12:32:12 crc kubenswrapper[4753]: E0129 12:32:12.111656 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"056e27f46aca1e39fcb6267b02e6b8748de9f6836da7bd55a0b5148163a59cdf\": container with ID starting with 056e27f46aca1e39fcb6267b02e6b8748de9f6836da7bd55a0b5148163a59cdf not found: ID does not exist" containerID="056e27f46aca1e39fcb6267b02e6b8748de9f6836da7bd55a0b5148163a59cdf" Jan 29 12:32:12 crc kubenswrapper[4753]: I0129 12:32:12.111833 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"056e27f46aca1e39fcb6267b02e6b8748de9f6836da7bd55a0b5148163a59cdf"} err="failed to get container status \"056e27f46aca1e39fcb6267b02e6b8748de9f6836da7bd55a0b5148163a59cdf\": rpc error: code = NotFound desc = could not find container \"056e27f46aca1e39fcb6267b02e6b8748de9f6836da7bd55a0b5148163a59cdf\": container with ID starting with 056e27f46aca1e39fcb6267b02e6b8748de9f6836da7bd55a0b5148163a59cdf not found: ID does not exist" Jan 29 12:32:12 crc kubenswrapper[4753]: I0129 12:32:12.111909 4753 scope.go:117] "RemoveContainer" containerID="8ade60df808d454198934fc448217bcca19a3633d29a12c5a706cb873f806811" Jan 29 12:32:12 crc kubenswrapper[4753]: E0129 12:32:12.112175 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8ade60df808d454198934fc448217bcca19a3633d29a12c5a706cb873f806811\": container with ID starting with 8ade60df808d454198934fc448217bcca19a3633d29a12c5a706cb873f806811 not found: ID does not exist" containerID="8ade60df808d454198934fc448217bcca19a3633d29a12c5a706cb873f806811" Jan 29 12:32:12 crc kubenswrapper[4753]: I0129 12:32:12.112202 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8ade60df808d454198934fc448217bcca19a3633d29a12c5a706cb873f806811"} err="failed to get container status \"8ade60df808d454198934fc448217bcca19a3633d29a12c5a706cb873f806811\": rpc error: code = NotFound desc = could not find container \"8ade60df808d454198934fc448217bcca19a3633d29a12c5a706cb873f806811\": container with ID starting with 8ade60df808d454198934fc448217bcca19a3633d29a12c5a706cb873f806811 not found: ID does not exist" Jan 29 12:32:12 crc kubenswrapper[4753]: I0129 12:32:12.112220 4753 scope.go:117] "RemoveContainer" containerID="1737e0ba3b3d084614c0a7a06cc57cad8d732ad8ed13e0c65e0965dfefef1d63" Jan 29 12:32:12 crc kubenswrapper[4753]: E0129 12:32:12.113531 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1737e0ba3b3d084614c0a7a06cc57cad8d732ad8ed13e0c65e0965dfefef1d63\": container with ID starting with 1737e0ba3b3d084614c0a7a06cc57cad8d732ad8ed13e0c65e0965dfefef1d63 not found: ID does not exist" containerID="1737e0ba3b3d084614c0a7a06cc57cad8d732ad8ed13e0c65e0965dfefef1d63" Jan 29 12:32:12 crc kubenswrapper[4753]: I0129 12:32:12.113562 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1737e0ba3b3d084614c0a7a06cc57cad8d732ad8ed13e0c65e0965dfefef1d63"} err="failed to get container status \"1737e0ba3b3d084614c0a7a06cc57cad8d732ad8ed13e0c65e0965dfefef1d63\": rpc error: code = NotFound desc = could not find container \"1737e0ba3b3d084614c0a7a06cc57cad8d732ad8ed13e0c65e0965dfefef1d63\": container with ID starting with 1737e0ba3b3d084614c0a7a06cc57cad8d732ad8ed13e0c65e0965dfefef1d63 not found: ID does not exist" Jan 29 12:32:12 crc kubenswrapper[4753]: I0129 12:32:12.113580 4753 scope.go:117] "RemoveContainer" containerID="b8020c59d568fbfdd9bf91a35ad05830bd19b500dc075b3d4bc3eda195246fab" Jan 29 12:32:12 crc kubenswrapper[4753]: I0129 12:32:12.129582 4753 scope.go:117] "RemoveContainer" containerID="2aa766e3f776a05fb581793f3b320cd526d9395e8e328f981b1cb5007225f32f" Jan 29 12:32:12 crc kubenswrapper[4753]: I0129 12:32:12.155881 4753 scope.go:117] "RemoveContainer" containerID="ae14f26fca5ecff409fc265c6b8752e73c7ecc71d20ff281e328fa1fc8e808fd" Jan 29 12:32:12 crc kubenswrapper[4753]: I0129 12:32:12.174505 4753 scope.go:117] "RemoveContainer" containerID="da95de997017674f5f725d2434c76b898c8454d351d2ae0c048009fecac2a70b" Jan 29 12:32:12 crc kubenswrapper[4753]: I0129 12:32:12.189990 4753 scope.go:117] "RemoveContainer" containerID="e1480ad953e1453491bfbacc844a038d09fdd5a675fd4ec79be9f3f750ee72ae" Jan 29 12:32:12 crc kubenswrapper[4753]: I0129 12:32:12.205671 4753 scope.go:117] "RemoveContainer" containerID="f2e02b2ab58bd62d93b270812546264b3e9210ccac10d6b3ed2c1080f1dba78d" Jan 29 12:32:12 crc kubenswrapper[4753]: I0129 12:32:12.225557 4753 scope.go:117] "RemoveContainer" containerID="fea50e28f0174fd91362c9abb3b729cc30c5f58c114ab482b2127002cf3c630b" Jan 29 12:32:12 crc kubenswrapper[4753]: I0129 12:32:12.247812 4753 scope.go:117] "RemoveContainer" containerID="21a68569c06ad75010210c795bc28381cfc8966ffbce36fdbe1e636ab5477089" Jan 29 12:32:12 crc kubenswrapper[4753]: I0129 12:32:12.264166 4753 scope.go:117] "RemoveContainer" containerID="863f0dd428950d22ff5bb1d391e9d5b81ac1a1cbbe4e695d9602a641d07a1a22" Jan 29 12:32:12 crc kubenswrapper[4753]: I0129 12:32:12.278437 4753 scope.go:117] "RemoveContainer" containerID="b75616dd1d2571d9087cbb56f8d01e12a0d244229467ec074a4010fb12da21f3" Jan 29 12:32:12 crc kubenswrapper[4753]: I0129 12:32:12.296274 4753 scope.go:117] "RemoveContainer" containerID="0f829cdb3721a769b92e78700efd5425645fe4eed5ead53148401905c22bb46d" Jan 29 12:32:12 crc kubenswrapper[4753]: I0129 12:32:12.461030 4753 scope.go:117] "RemoveContainer" containerID="1661471db32399586faa01821a0cd7e36b5025a57e9a8d4f08b6a0cdc8302d89" Jan 29 12:32:12 crc kubenswrapper[4753]: I0129 12:32:12.480456 4753 scope.go:117] "RemoveContainer" containerID="4bf0d941158a40c1b8708f56a8bbbbd8f371ac2b901f4843bcdc628feb29b830" Jan 29 12:32:12 crc kubenswrapper[4753]: I0129 12:32:12.511357 4753 scope.go:117] "RemoveContainer" containerID="26ced7e34171d6bfce760564034338cc137c0fec95e21a1eda4b638ce903778b" Jan 29 12:32:12 crc kubenswrapper[4753]: I0129 12:32:12.534778 4753 scope.go:117] "RemoveContainer" containerID="ebbaa821837a219076c7e5eaecb2900a7f98b03a800d03b39a05eb1bc69709ae" Jan 29 12:32:12 crc kubenswrapper[4753]: I0129 12:32:12.556691 4753 scope.go:117] "RemoveContainer" containerID="b8020c59d568fbfdd9bf91a35ad05830bd19b500dc075b3d4bc3eda195246fab" Jan 29 12:32:12 crc kubenswrapper[4753]: E0129 12:32:12.557196 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b8020c59d568fbfdd9bf91a35ad05830bd19b500dc075b3d4bc3eda195246fab\": container with ID starting with b8020c59d568fbfdd9bf91a35ad05830bd19b500dc075b3d4bc3eda195246fab not found: ID does not exist" containerID="b8020c59d568fbfdd9bf91a35ad05830bd19b500dc075b3d4bc3eda195246fab" Jan 29 12:32:12 crc kubenswrapper[4753]: I0129 12:32:12.557249 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b8020c59d568fbfdd9bf91a35ad05830bd19b500dc075b3d4bc3eda195246fab"} err="failed to get container status \"b8020c59d568fbfdd9bf91a35ad05830bd19b500dc075b3d4bc3eda195246fab\": rpc error: code = NotFound desc = could not find container \"b8020c59d568fbfdd9bf91a35ad05830bd19b500dc075b3d4bc3eda195246fab\": container with ID starting with b8020c59d568fbfdd9bf91a35ad05830bd19b500dc075b3d4bc3eda195246fab not found: ID does not exist" Jan 29 12:32:12 crc kubenswrapper[4753]: I0129 12:32:12.557274 4753 scope.go:117] "RemoveContainer" containerID="2aa766e3f776a05fb581793f3b320cd526d9395e8e328f981b1cb5007225f32f" Jan 29 12:32:12 crc kubenswrapper[4753]: E0129 12:32:12.557712 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2aa766e3f776a05fb581793f3b320cd526d9395e8e328f981b1cb5007225f32f\": container with ID starting with 2aa766e3f776a05fb581793f3b320cd526d9395e8e328f981b1cb5007225f32f not found: ID does not exist" containerID="2aa766e3f776a05fb581793f3b320cd526d9395e8e328f981b1cb5007225f32f" Jan 29 12:32:12 crc kubenswrapper[4753]: I0129 12:32:12.557731 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2aa766e3f776a05fb581793f3b320cd526d9395e8e328f981b1cb5007225f32f"} err="failed to get container status \"2aa766e3f776a05fb581793f3b320cd526d9395e8e328f981b1cb5007225f32f\": rpc error: code = NotFound desc = could not find container \"2aa766e3f776a05fb581793f3b320cd526d9395e8e328f981b1cb5007225f32f\": container with ID starting with 2aa766e3f776a05fb581793f3b320cd526d9395e8e328f981b1cb5007225f32f not found: ID does not exist" Jan 29 12:32:12 crc kubenswrapper[4753]: I0129 12:32:12.557751 4753 scope.go:117] "RemoveContainer" containerID="ae14f26fca5ecff409fc265c6b8752e73c7ecc71d20ff281e328fa1fc8e808fd" Jan 29 12:32:12 crc kubenswrapper[4753]: E0129 12:32:12.558018 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ae14f26fca5ecff409fc265c6b8752e73c7ecc71d20ff281e328fa1fc8e808fd\": container with ID starting with ae14f26fca5ecff409fc265c6b8752e73c7ecc71d20ff281e328fa1fc8e808fd not found: ID does not exist" containerID="ae14f26fca5ecff409fc265c6b8752e73c7ecc71d20ff281e328fa1fc8e808fd" Jan 29 12:32:12 crc kubenswrapper[4753]: I0129 12:32:12.558037 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ae14f26fca5ecff409fc265c6b8752e73c7ecc71d20ff281e328fa1fc8e808fd"} err="failed to get container status \"ae14f26fca5ecff409fc265c6b8752e73c7ecc71d20ff281e328fa1fc8e808fd\": rpc error: code = NotFound desc = could not find container \"ae14f26fca5ecff409fc265c6b8752e73c7ecc71d20ff281e328fa1fc8e808fd\": container with ID starting with ae14f26fca5ecff409fc265c6b8752e73c7ecc71d20ff281e328fa1fc8e808fd not found: ID does not exist" Jan 29 12:32:12 crc kubenswrapper[4753]: I0129 12:32:12.558049 4753 scope.go:117] "RemoveContainer" containerID="da95de997017674f5f725d2434c76b898c8454d351d2ae0c048009fecac2a70b" Jan 29 12:32:12 crc kubenswrapper[4753]: E0129 12:32:12.558326 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"da95de997017674f5f725d2434c76b898c8454d351d2ae0c048009fecac2a70b\": container with ID starting with da95de997017674f5f725d2434c76b898c8454d351d2ae0c048009fecac2a70b not found: ID does not exist" containerID="da95de997017674f5f725d2434c76b898c8454d351d2ae0c048009fecac2a70b" Jan 29 12:32:12 crc kubenswrapper[4753]: I0129 12:32:12.558351 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"da95de997017674f5f725d2434c76b898c8454d351d2ae0c048009fecac2a70b"} err="failed to get container status \"da95de997017674f5f725d2434c76b898c8454d351d2ae0c048009fecac2a70b\": rpc error: code = NotFound desc = could not find container \"da95de997017674f5f725d2434c76b898c8454d351d2ae0c048009fecac2a70b\": container with ID starting with da95de997017674f5f725d2434c76b898c8454d351d2ae0c048009fecac2a70b not found: ID does not exist" Jan 29 12:32:12 crc kubenswrapper[4753]: I0129 12:32:12.558369 4753 scope.go:117] "RemoveContainer" containerID="e1480ad953e1453491bfbacc844a038d09fdd5a675fd4ec79be9f3f750ee72ae" Jan 29 12:32:12 crc kubenswrapper[4753]: E0129 12:32:12.561256 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e1480ad953e1453491bfbacc844a038d09fdd5a675fd4ec79be9f3f750ee72ae\": container with ID starting with e1480ad953e1453491bfbacc844a038d09fdd5a675fd4ec79be9f3f750ee72ae not found: ID does not exist" containerID="e1480ad953e1453491bfbacc844a038d09fdd5a675fd4ec79be9f3f750ee72ae" Jan 29 12:32:12 crc kubenswrapper[4753]: I0129 12:32:12.561299 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e1480ad953e1453491bfbacc844a038d09fdd5a675fd4ec79be9f3f750ee72ae"} err="failed to get container status \"e1480ad953e1453491bfbacc844a038d09fdd5a675fd4ec79be9f3f750ee72ae\": rpc error: code = NotFound desc = could not find container \"e1480ad953e1453491bfbacc844a038d09fdd5a675fd4ec79be9f3f750ee72ae\": container with ID starting with e1480ad953e1453491bfbacc844a038d09fdd5a675fd4ec79be9f3f750ee72ae not found: ID does not exist" Jan 29 12:32:12 crc kubenswrapper[4753]: I0129 12:32:12.561326 4753 scope.go:117] "RemoveContainer" containerID="f2e02b2ab58bd62d93b270812546264b3e9210ccac10d6b3ed2c1080f1dba78d" Jan 29 12:32:12 crc kubenswrapper[4753]: E0129 12:32:12.563637 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f2e02b2ab58bd62d93b270812546264b3e9210ccac10d6b3ed2c1080f1dba78d\": container with ID starting with f2e02b2ab58bd62d93b270812546264b3e9210ccac10d6b3ed2c1080f1dba78d not found: ID does not exist" containerID="f2e02b2ab58bd62d93b270812546264b3e9210ccac10d6b3ed2c1080f1dba78d" Jan 29 12:32:12 crc kubenswrapper[4753]: I0129 12:32:12.563667 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f2e02b2ab58bd62d93b270812546264b3e9210ccac10d6b3ed2c1080f1dba78d"} err="failed to get container status \"f2e02b2ab58bd62d93b270812546264b3e9210ccac10d6b3ed2c1080f1dba78d\": rpc error: code = NotFound desc = could not find container \"f2e02b2ab58bd62d93b270812546264b3e9210ccac10d6b3ed2c1080f1dba78d\": container with ID starting with f2e02b2ab58bd62d93b270812546264b3e9210ccac10d6b3ed2c1080f1dba78d not found: ID does not exist" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.891196 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["swift-kuttl-tests/swift-storage-0"] Jan 29 12:32:14 crc kubenswrapper[4753]: E0129 12:32:14.893703 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f186cee-89fe-4c25-b227-628ffcb2a98f" containerName="account-auditor" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.893749 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f186cee-89fe-4c25-b227-628ffcb2a98f" containerName="account-auditor" Jan 29 12:32:14 crc kubenswrapper[4753]: E0129 12:32:14.893772 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cdf821d7-7cec-4c33-a59e-1c6493fd7281" containerName="swift-recon-cron" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.893783 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="cdf821d7-7cec-4c33-a59e-1c6493fd7281" containerName="swift-recon-cron" Jan 29 12:32:14 crc kubenswrapper[4753]: E0129 12:32:14.893797 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cec6584a-fcc6-4b1d-8516-c2e6b2194048" containerName="proxy-httpd" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.893806 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="cec6584a-fcc6-4b1d-8516-c2e6b2194048" containerName="proxy-httpd" Jan 29 12:32:14 crc kubenswrapper[4753]: E0129 12:32:14.893815 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1" containerName="object-expirer" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.893830 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1" containerName="object-expirer" Jan 29 12:32:14 crc kubenswrapper[4753]: E0129 12:32:14.893841 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1" containerName="swift-recon-cron" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.893848 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1" containerName="swift-recon-cron" Jan 29 12:32:14 crc kubenswrapper[4753]: E0129 12:32:14.893869 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cdf821d7-7cec-4c33-a59e-1c6493fd7281" containerName="container-server" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.893876 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="cdf821d7-7cec-4c33-a59e-1c6493fd7281" containerName="container-server" Jan 29 12:32:14 crc kubenswrapper[4753]: E0129 12:32:14.893887 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cdf821d7-7cec-4c33-a59e-1c6493fd7281" containerName="object-replicator" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.893895 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="cdf821d7-7cec-4c33-a59e-1c6493fd7281" containerName="object-replicator" Jan 29 12:32:14 crc kubenswrapper[4753]: E0129 12:32:14.893903 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f186cee-89fe-4c25-b227-628ffcb2a98f" containerName="rsync" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.893912 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f186cee-89fe-4c25-b227-628ffcb2a98f" containerName="rsync" Jan 29 12:32:14 crc kubenswrapper[4753]: E0129 12:32:14.893923 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1" containerName="container-updater" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.893930 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1" containerName="container-updater" Jan 29 12:32:14 crc kubenswrapper[4753]: E0129 12:32:14.893944 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1" containerName="container-server" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.893953 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1" containerName="container-server" Jan 29 12:32:14 crc kubenswrapper[4753]: E0129 12:32:14.893960 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1" containerName="object-server" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.893967 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1" containerName="object-server" Jan 29 12:32:14 crc kubenswrapper[4753]: E0129 12:32:14.893978 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1" containerName="account-server" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.893985 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1" containerName="account-server" Jan 29 12:32:14 crc kubenswrapper[4753]: E0129 12:32:14.893995 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1" containerName="account-reaper" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.894001 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1" containerName="account-reaper" Jan 29 12:32:14 crc kubenswrapper[4753]: E0129 12:32:14.894011 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f186cee-89fe-4c25-b227-628ffcb2a98f" containerName="container-updater" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.894018 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f186cee-89fe-4c25-b227-628ffcb2a98f" containerName="container-updater" Jan 29 12:32:14 crc kubenswrapper[4753]: E0129 12:32:14.894034 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cdf821d7-7cec-4c33-a59e-1c6493fd7281" containerName="object-auditor" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.894041 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="cdf821d7-7cec-4c33-a59e-1c6493fd7281" containerName="object-auditor" Jan 29 12:32:14 crc kubenswrapper[4753]: E0129 12:32:14.894050 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cdf821d7-7cec-4c33-a59e-1c6493fd7281" containerName="container-replicator" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.894056 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="cdf821d7-7cec-4c33-a59e-1c6493fd7281" containerName="container-replicator" Jan 29 12:32:14 crc kubenswrapper[4753]: E0129 12:32:14.894069 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cdf821d7-7cec-4c33-a59e-1c6493fd7281" containerName="account-server" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.894076 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="cdf821d7-7cec-4c33-a59e-1c6493fd7281" containerName="account-server" Jan 29 12:32:14 crc kubenswrapper[4753]: E0129 12:32:14.894087 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cdf821d7-7cec-4c33-a59e-1c6493fd7281" containerName="container-updater" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.894095 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="cdf821d7-7cec-4c33-a59e-1c6493fd7281" containerName="container-updater" Jan 29 12:32:14 crc kubenswrapper[4753]: E0129 12:32:14.894110 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cdf821d7-7cec-4c33-a59e-1c6493fd7281" containerName="object-server" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.894125 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="cdf821d7-7cec-4c33-a59e-1c6493fd7281" containerName="object-server" Jan 29 12:32:14 crc kubenswrapper[4753]: E0129 12:32:14.894135 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cdf821d7-7cec-4c33-a59e-1c6493fd7281" containerName="object-expirer" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.894142 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="cdf821d7-7cec-4c33-a59e-1c6493fd7281" containerName="object-expirer" Jan 29 12:32:14 crc kubenswrapper[4753]: E0129 12:32:14.894152 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cdf821d7-7cec-4c33-a59e-1c6493fd7281" containerName="object-updater" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.894160 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="cdf821d7-7cec-4c33-a59e-1c6493fd7281" containerName="object-updater" Jan 29 12:32:14 crc kubenswrapper[4753]: E0129 12:32:14.894175 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e4000d05-91fb-44b6-a38f-afa7b616dd8b" containerName="swift-ring-rebalance" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.894182 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="e4000d05-91fb-44b6-a38f-afa7b616dd8b" containerName="swift-ring-rebalance" Jan 29 12:32:14 crc kubenswrapper[4753]: E0129 12:32:14.894197 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1" containerName="container-auditor" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.894204 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1" containerName="container-auditor" Jan 29 12:32:14 crc kubenswrapper[4753]: E0129 12:32:14.894219 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cdf821d7-7cec-4c33-a59e-1c6493fd7281" containerName="container-auditor" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.894246 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="cdf821d7-7cec-4c33-a59e-1c6493fd7281" containerName="container-auditor" Jan 29 12:32:14 crc kubenswrapper[4753]: E0129 12:32:14.894259 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1" containerName="object-auditor" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.894266 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1" containerName="object-auditor" Jan 29 12:32:14 crc kubenswrapper[4753]: E0129 12:32:14.894277 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cdf821d7-7cec-4c33-a59e-1c6493fd7281" containerName="account-auditor" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.894284 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="cdf821d7-7cec-4c33-a59e-1c6493fd7281" containerName="account-auditor" Jan 29 12:32:14 crc kubenswrapper[4753]: E0129 12:32:14.894296 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f186cee-89fe-4c25-b227-628ffcb2a98f" containerName="object-updater" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.894303 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f186cee-89fe-4c25-b227-628ffcb2a98f" containerName="object-updater" Jan 29 12:32:14 crc kubenswrapper[4753]: E0129 12:32:14.894314 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1" containerName="account-replicator" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.894323 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1" containerName="account-replicator" Jan 29 12:32:14 crc kubenswrapper[4753]: E0129 12:32:14.894348 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f186cee-89fe-4c25-b227-628ffcb2a98f" containerName="account-server" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.894356 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f186cee-89fe-4c25-b227-628ffcb2a98f" containerName="account-server" Jan 29 12:32:14 crc kubenswrapper[4753]: E0129 12:32:14.894367 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1" containerName="object-updater" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.894374 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1" containerName="object-updater" Jan 29 12:32:14 crc kubenswrapper[4753]: E0129 12:32:14.894394 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f186cee-89fe-4c25-b227-628ffcb2a98f" containerName="account-reaper" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.894403 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f186cee-89fe-4c25-b227-628ffcb2a98f" containerName="account-reaper" Jan 29 12:32:14 crc kubenswrapper[4753]: E0129 12:32:14.894412 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1" containerName="rsync" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.894419 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1" containerName="rsync" Jan 29 12:32:14 crc kubenswrapper[4753]: E0129 12:32:14.894428 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f186cee-89fe-4c25-b227-628ffcb2a98f" containerName="object-expirer" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.894434 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f186cee-89fe-4c25-b227-628ffcb2a98f" containerName="object-expirer" Jan 29 12:32:14 crc kubenswrapper[4753]: E0129 12:32:14.894444 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1" containerName="account-auditor" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.894451 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1" containerName="account-auditor" Jan 29 12:32:14 crc kubenswrapper[4753]: E0129 12:32:14.894459 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f186cee-89fe-4c25-b227-628ffcb2a98f" containerName="container-server" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.894467 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f186cee-89fe-4c25-b227-628ffcb2a98f" containerName="container-server" Jan 29 12:32:14 crc kubenswrapper[4753]: E0129 12:32:14.894492 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f186cee-89fe-4c25-b227-628ffcb2a98f" containerName="container-auditor" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.894502 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f186cee-89fe-4c25-b227-628ffcb2a98f" containerName="container-auditor" Jan 29 12:32:14 crc kubenswrapper[4753]: E0129 12:32:14.894511 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f186cee-89fe-4c25-b227-628ffcb2a98f" containerName="object-auditor" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.894518 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f186cee-89fe-4c25-b227-628ffcb2a98f" containerName="object-auditor" Jan 29 12:32:14 crc kubenswrapper[4753]: E0129 12:32:14.894529 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cdf821d7-7cec-4c33-a59e-1c6493fd7281" containerName="account-reaper" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.894538 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="cdf821d7-7cec-4c33-a59e-1c6493fd7281" containerName="account-reaper" Jan 29 12:32:14 crc kubenswrapper[4753]: E0129 12:32:14.894552 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1" containerName="object-replicator" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.894559 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1" containerName="object-replicator" Jan 29 12:32:14 crc kubenswrapper[4753]: E0129 12:32:14.894567 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f186cee-89fe-4c25-b227-628ffcb2a98f" containerName="object-replicator" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.894574 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f186cee-89fe-4c25-b227-628ffcb2a98f" containerName="object-replicator" Jan 29 12:32:14 crc kubenswrapper[4753]: E0129 12:32:14.894583 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1" containerName="container-replicator" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.894591 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1" containerName="container-replicator" Jan 29 12:32:14 crc kubenswrapper[4753]: E0129 12:32:14.894615 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cdf821d7-7cec-4c33-a59e-1c6493fd7281" containerName="rsync" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.894624 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="cdf821d7-7cec-4c33-a59e-1c6493fd7281" containerName="rsync" Jan 29 12:32:14 crc kubenswrapper[4753]: E0129 12:32:14.894634 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f186cee-89fe-4c25-b227-628ffcb2a98f" containerName="container-replicator" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.894641 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f186cee-89fe-4c25-b227-628ffcb2a98f" containerName="container-replicator" Jan 29 12:32:14 crc kubenswrapper[4753]: E0129 12:32:14.894655 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cec6584a-fcc6-4b1d-8516-c2e6b2194048" containerName="proxy-server" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.894665 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="cec6584a-fcc6-4b1d-8516-c2e6b2194048" containerName="proxy-server" Jan 29 12:32:14 crc kubenswrapper[4753]: E0129 12:32:14.894679 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f186cee-89fe-4c25-b227-628ffcb2a98f" containerName="object-server" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.894687 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f186cee-89fe-4c25-b227-628ffcb2a98f" containerName="object-server" Jan 29 12:32:14 crc kubenswrapper[4753]: E0129 12:32:14.894700 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cdf821d7-7cec-4c33-a59e-1c6493fd7281" containerName="account-replicator" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.894709 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="cdf821d7-7cec-4c33-a59e-1c6493fd7281" containerName="account-replicator" Jan 29 12:32:14 crc kubenswrapper[4753]: E0129 12:32:14.894723 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f186cee-89fe-4c25-b227-628ffcb2a98f" containerName="account-replicator" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.894730 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f186cee-89fe-4c25-b227-628ffcb2a98f" containerName="account-replicator" Jan 29 12:32:14 crc kubenswrapper[4753]: E0129 12:32:14.894739 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f186cee-89fe-4c25-b227-628ffcb2a98f" containerName="swift-recon-cron" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.894746 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f186cee-89fe-4c25-b227-628ffcb2a98f" containerName="swift-recon-cron" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.894996 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="cdf821d7-7cec-4c33-a59e-1c6493fd7281" containerName="container-auditor" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.895023 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="5f186cee-89fe-4c25-b227-628ffcb2a98f" containerName="account-auditor" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.895031 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1" containerName="container-auditor" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.895044 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="5f186cee-89fe-4c25-b227-628ffcb2a98f" containerName="object-expirer" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.895057 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="5f186cee-89fe-4c25-b227-628ffcb2a98f" containerName="container-server" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.895066 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="cdf821d7-7cec-4c33-a59e-1c6493fd7281" containerName="container-server" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.895078 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="cdf821d7-7cec-4c33-a59e-1c6493fd7281" containerName="swift-recon-cron" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.895089 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1" containerName="container-server" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.895101 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1" containerName="container-replicator" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.895113 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1" containerName="swift-recon-cron" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.895124 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="5f186cee-89fe-4c25-b227-628ffcb2a98f" containerName="object-server" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.895134 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="cdf821d7-7cec-4c33-a59e-1c6493fd7281" containerName="account-server" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.895145 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1" containerName="account-reaper" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.895156 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1" containerName="object-server" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.895164 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="cdf821d7-7cec-4c33-a59e-1c6493fd7281" containerName="object-auditor" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.895178 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="5f186cee-89fe-4c25-b227-628ffcb2a98f" containerName="container-updater" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.895186 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="e4000d05-91fb-44b6-a38f-afa7b616dd8b" containerName="swift-ring-rebalance" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.895194 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1" containerName="account-replicator" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.895205 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="5f186cee-89fe-4c25-b227-628ffcb2a98f" containerName="object-updater" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.895213 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="cec6584a-fcc6-4b1d-8516-c2e6b2194048" containerName="proxy-server" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.895237 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="cdf821d7-7cec-4c33-a59e-1c6493fd7281" containerName="account-reaper" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.895252 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1" containerName="container-updater" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.895262 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="cdf821d7-7cec-4c33-a59e-1c6493fd7281" containerName="account-auditor" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.895270 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="5f186cee-89fe-4c25-b227-628ffcb2a98f" containerName="account-replicator" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.895283 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="cdf821d7-7cec-4c33-a59e-1c6493fd7281" containerName="object-expirer" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.895292 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="5f186cee-89fe-4c25-b227-628ffcb2a98f" containerName="object-auditor" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.895300 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1" containerName="rsync" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.895313 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="5f186cee-89fe-4c25-b227-628ffcb2a98f" containerName="swift-recon-cron" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.895323 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1" containerName="object-expirer" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.895331 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="5f186cee-89fe-4c25-b227-628ffcb2a98f" containerName="account-server" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.895340 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="cdf821d7-7cec-4c33-a59e-1c6493fd7281" containerName="account-replicator" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.895350 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="cdf821d7-7cec-4c33-a59e-1c6493fd7281" containerName="rsync" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.895361 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="5f186cee-89fe-4c25-b227-628ffcb2a98f" containerName="account-reaper" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.895370 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="5f186cee-89fe-4c25-b227-628ffcb2a98f" containerName="container-auditor" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.895380 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1" containerName="object-auditor" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.895391 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="5f186cee-89fe-4c25-b227-628ffcb2a98f" containerName="rsync" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.895405 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="cdf821d7-7cec-4c33-a59e-1c6493fd7281" containerName="object-updater" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.895418 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="5f186cee-89fe-4c25-b227-628ffcb2a98f" containerName="container-replicator" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.895429 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="5f186cee-89fe-4c25-b227-628ffcb2a98f" containerName="object-replicator" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.895440 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1" containerName="account-auditor" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.895449 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="cdf821d7-7cec-4c33-a59e-1c6493fd7281" containerName="object-replicator" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.895461 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1" containerName="object-updater" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.895471 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="cdf821d7-7cec-4c33-a59e-1c6493fd7281" containerName="object-server" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.895480 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1" containerName="object-replicator" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.895490 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="cdf821d7-7cec-4c33-a59e-1c6493fd7281" containerName="container-updater" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.895500 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="cec6584a-fcc6-4b1d-8516-c2e6b2194048" containerName="proxy-httpd" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.895509 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="fb9fbcbf-3cdc-449f-9754-7d54c5f17aa1" containerName="account-server" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.895520 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="cdf821d7-7cec-4c33-a59e-1c6493fd7281" containerName="container-replicator" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.901396 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.903776 4753 reflector.go:368] Caches populated for *v1.Secret from object-"swift-kuttl-tests"/"swift-swift-dockercfg-dc6t9" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.908485 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"swift-kuttl-tests"/"swift-ring-files" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.908861 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"swift-kuttl-tests"/"swift-storage-config-data" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.909325 4753 reflector.go:368] Caches populated for *v1.Secret from object-"swift-kuttl-tests"/"swift-conf" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.941053 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/swift-storage-0"] Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.950501 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["swift-kuttl-tests/swift-ring-rebalance-rkchn"] Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.951469 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-ring-rebalance-rkchn" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.953696 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"swift-kuttl-tests"/"swift-ring-config-data" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.953812 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"swift-kuttl-tests"/"swift-ring-scripts" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.954145 4753 reflector.go:368] Caches populated for *v1.Secret from object-"swift-kuttl-tests"/"swift-proxy-config-data" Jan 29 12:32:14 crc kubenswrapper[4753]: I0129 12:32:14.984193 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/swift-ring-rebalance-rkchn"] Jan 29 12:32:15 crc kubenswrapper[4753]: I0129 12:32:15.078186 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"swift-storage-0\" (UID: \"73d3c43c-a487-4cbe-a709-833d3814f63a\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:32:15 crc kubenswrapper[4753]: I0129 12:32:15.078934 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/5188a03d-cba0-4bbb-9c43-5f1ff77cf44d-ring-data-devices\") pod \"swift-ring-rebalance-rkchn\" (UID: \"5188a03d-cba0-4bbb-9c43-5f1ff77cf44d\") " pod="swift-kuttl-tests/swift-ring-rebalance-rkchn" Jan 29 12:32:15 crc kubenswrapper[4753]: I0129 12:32:15.079054 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/5188a03d-cba0-4bbb-9c43-5f1ff77cf44d-dispersionconf\") pod \"swift-ring-rebalance-rkchn\" (UID: \"5188a03d-cba0-4bbb-9c43-5f1ff77cf44d\") " pod="swift-kuttl-tests/swift-ring-rebalance-rkchn" Jan 29 12:32:15 crc kubenswrapper[4753]: I0129 12:32:15.079163 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/5188a03d-cba0-4bbb-9c43-5f1ff77cf44d-swiftconf\") pod \"swift-ring-rebalance-rkchn\" (UID: \"5188a03d-cba0-4bbb-9c43-5f1ff77cf44d\") " pod="swift-kuttl-tests/swift-ring-rebalance-rkchn" Jan 29 12:32:15 crc kubenswrapper[4753]: I0129 12:32:15.079276 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/73d3c43c-a487-4cbe-a709-833d3814f63a-lock\") pod \"swift-storage-0\" (UID: \"73d3c43c-a487-4cbe-a709-833d3814f63a\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:32:15 crc kubenswrapper[4753]: I0129 12:32:15.079391 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5188a03d-cba0-4bbb-9c43-5f1ff77cf44d-scripts\") pod \"swift-ring-rebalance-rkchn\" (UID: \"5188a03d-cba0-4bbb-9c43-5f1ff77cf44d\") " pod="swift-kuttl-tests/swift-ring-rebalance-rkchn" Jan 29 12:32:15 crc kubenswrapper[4753]: I0129 12:32:15.079478 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jn48d\" (UniqueName: \"kubernetes.io/projected/5188a03d-cba0-4bbb-9c43-5f1ff77cf44d-kube-api-access-jn48d\") pod \"swift-ring-rebalance-rkchn\" (UID: \"5188a03d-cba0-4bbb-9c43-5f1ff77cf44d\") " pod="swift-kuttl-tests/swift-ring-rebalance-rkchn" Jan 29 12:32:15 crc kubenswrapper[4753]: I0129 12:32:15.079586 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rhngw\" (UniqueName: \"kubernetes.io/projected/73d3c43c-a487-4cbe-a709-833d3814f63a-kube-api-access-rhngw\") pod \"swift-storage-0\" (UID: \"73d3c43c-a487-4cbe-a709-833d3814f63a\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:32:15 crc kubenswrapper[4753]: I0129 12:32:15.079685 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/73d3c43c-a487-4cbe-a709-833d3814f63a-etc-swift\") pod \"swift-storage-0\" (UID: \"73d3c43c-a487-4cbe-a709-833d3814f63a\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:32:15 crc kubenswrapper[4753]: I0129 12:32:15.079765 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/5188a03d-cba0-4bbb-9c43-5f1ff77cf44d-etc-swift\") pod \"swift-ring-rebalance-rkchn\" (UID: \"5188a03d-cba0-4bbb-9c43-5f1ff77cf44d\") " pod="swift-kuttl-tests/swift-ring-rebalance-rkchn" Jan 29 12:32:15 crc kubenswrapper[4753]: I0129 12:32:15.079843 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/73d3c43c-a487-4cbe-a709-833d3814f63a-cache\") pod \"swift-storage-0\" (UID: \"73d3c43c-a487-4cbe-a709-833d3814f63a\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:32:15 crc kubenswrapper[4753]: I0129 12:32:15.236072 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rhngw\" (UniqueName: \"kubernetes.io/projected/73d3c43c-a487-4cbe-a709-833d3814f63a-kube-api-access-rhngw\") pod \"swift-storage-0\" (UID: \"73d3c43c-a487-4cbe-a709-833d3814f63a\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:32:15 crc kubenswrapper[4753]: I0129 12:32:15.236150 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/73d3c43c-a487-4cbe-a709-833d3814f63a-etc-swift\") pod \"swift-storage-0\" (UID: \"73d3c43c-a487-4cbe-a709-833d3814f63a\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:32:15 crc kubenswrapper[4753]: I0129 12:32:15.236187 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/5188a03d-cba0-4bbb-9c43-5f1ff77cf44d-etc-swift\") pod \"swift-ring-rebalance-rkchn\" (UID: \"5188a03d-cba0-4bbb-9c43-5f1ff77cf44d\") " pod="swift-kuttl-tests/swift-ring-rebalance-rkchn" Jan 29 12:32:15 crc kubenswrapper[4753]: I0129 12:32:15.236215 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/73d3c43c-a487-4cbe-a709-833d3814f63a-cache\") pod \"swift-storage-0\" (UID: \"73d3c43c-a487-4cbe-a709-833d3814f63a\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:32:15 crc kubenswrapper[4753]: I0129 12:32:15.236343 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"swift-storage-0\" (UID: \"73d3c43c-a487-4cbe-a709-833d3814f63a\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:32:15 crc kubenswrapper[4753]: I0129 12:32:15.236409 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/5188a03d-cba0-4bbb-9c43-5f1ff77cf44d-ring-data-devices\") pod \"swift-ring-rebalance-rkchn\" (UID: \"5188a03d-cba0-4bbb-9c43-5f1ff77cf44d\") " pod="swift-kuttl-tests/swift-ring-rebalance-rkchn" Jan 29 12:32:15 crc kubenswrapper[4753]: I0129 12:32:15.236435 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/5188a03d-cba0-4bbb-9c43-5f1ff77cf44d-dispersionconf\") pod \"swift-ring-rebalance-rkchn\" (UID: \"5188a03d-cba0-4bbb-9c43-5f1ff77cf44d\") " pod="swift-kuttl-tests/swift-ring-rebalance-rkchn" Jan 29 12:32:15 crc kubenswrapper[4753]: I0129 12:32:15.236497 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/5188a03d-cba0-4bbb-9c43-5f1ff77cf44d-swiftconf\") pod \"swift-ring-rebalance-rkchn\" (UID: \"5188a03d-cba0-4bbb-9c43-5f1ff77cf44d\") " pod="swift-kuttl-tests/swift-ring-rebalance-rkchn" Jan 29 12:32:15 crc kubenswrapper[4753]: I0129 12:32:15.236610 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/73d3c43c-a487-4cbe-a709-833d3814f63a-lock\") pod \"swift-storage-0\" (UID: \"73d3c43c-a487-4cbe-a709-833d3814f63a\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:32:15 crc kubenswrapper[4753]: I0129 12:32:15.236709 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5188a03d-cba0-4bbb-9c43-5f1ff77cf44d-scripts\") pod \"swift-ring-rebalance-rkchn\" (UID: \"5188a03d-cba0-4bbb-9c43-5f1ff77cf44d\") " pod="swift-kuttl-tests/swift-ring-rebalance-rkchn" Jan 29 12:32:15 crc kubenswrapper[4753]: I0129 12:32:15.236738 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jn48d\" (UniqueName: \"kubernetes.io/projected/5188a03d-cba0-4bbb-9c43-5f1ff77cf44d-kube-api-access-jn48d\") pod \"swift-ring-rebalance-rkchn\" (UID: \"5188a03d-cba0-4bbb-9c43-5f1ff77cf44d\") " pod="swift-kuttl-tests/swift-ring-rebalance-rkchn" Jan 29 12:32:15 crc kubenswrapper[4753]: I0129 12:32:15.236907 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/73d3c43c-a487-4cbe-a709-833d3814f63a-cache\") pod \"swift-storage-0\" (UID: \"73d3c43c-a487-4cbe-a709-833d3814f63a\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:32:15 crc kubenswrapper[4753]: E0129 12:32:15.237030 4753 projected.go:288] Couldn't get configMap swift-kuttl-tests/swift-ring-files: configmap "swift-ring-files" not found Jan 29 12:32:15 crc kubenswrapper[4753]: E0129 12:32:15.237056 4753 projected.go:194] Error preparing data for projected volume etc-swift for pod swift-kuttl-tests/swift-storage-0: configmap "swift-ring-files" not found Jan 29 12:32:15 crc kubenswrapper[4753]: E0129 12:32:15.237131 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/73d3c43c-a487-4cbe-a709-833d3814f63a-etc-swift podName:73d3c43c-a487-4cbe-a709-833d3814f63a nodeName:}" failed. No retries permitted until 2026-01-29 12:32:15.73709311 +0000 UTC m=+1549.989174565 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/73d3c43c-a487-4cbe-a709-833d3814f63a-etc-swift") pod "swift-storage-0" (UID: "73d3c43c-a487-4cbe-a709-833d3814f63a") : configmap "swift-ring-files" not found Jan 29 12:32:15 crc kubenswrapper[4753]: I0129 12:32:15.237428 4753 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"swift-storage-0\" (UID: \"73d3c43c-a487-4cbe-a709-833d3814f63a\") device mount path \"/mnt/openstack/pv04\"" pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:32:15 crc kubenswrapper[4753]: I0129 12:32:15.237677 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/5188a03d-cba0-4bbb-9c43-5f1ff77cf44d-etc-swift\") pod \"swift-ring-rebalance-rkchn\" (UID: \"5188a03d-cba0-4bbb-9c43-5f1ff77cf44d\") " pod="swift-kuttl-tests/swift-ring-rebalance-rkchn" Jan 29 12:32:15 crc kubenswrapper[4753]: I0129 12:32:15.238807 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/73d3c43c-a487-4cbe-a709-833d3814f63a-lock\") pod \"swift-storage-0\" (UID: \"73d3c43c-a487-4cbe-a709-833d3814f63a\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:32:15 crc kubenswrapper[4753]: I0129 12:32:15.239339 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5188a03d-cba0-4bbb-9c43-5f1ff77cf44d-scripts\") pod \"swift-ring-rebalance-rkchn\" (UID: \"5188a03d-cba0-4bbb-9c43-5f1ff77cf44d\") " pod="swift-kuttl-tests/swift-ring-rebalance-rkchn" Jan 29 12:32:15 crc kubenswrapper[4753]: I0129 12:32:15.239539 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/5188a03d-cba0-4bbb-9c43-5f1ff77cf44d-ring-data-devices\") pod \"swift-ring-rebalance-rkchn\" (UID: \"5188a03d-cba0-4bbb-9c43-5f1ff77cf44d\") " pod="swift-kuttl-tests/swift-ring-rebalance-rkchn" Jan 29 12:32:15 crc kubenswrapper[4753]: I0129 12:32:15.243576 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/5188a03d-cba0-4bbb-9c43-5f1ff77cf44d-dispersionconf\") pod \"swift-ring-rebalance-rkchn\" (UID: \"5188a03d-cba0-4bbb-9c43-5f1ff77cf44d\") " pod="swift-kuttl-tests/swift-ring-rebalance-rkchn" Jan 29 12:32:15 crc kubenswrapper[4753]: I0129 12:32:15.246910 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/5188a03d-cba0-4bbb-9c43-5f1ff77cf44d-swiftconf\") pod \"swift-ring-rebalance-rkchn\" (UID: \"5188a03d-cba0-4bbb-9c43-5f1ff77cf44d\") " pod="swift-kuttl-tests/swift-ring-rebalance-rkchn" Jan 29 12:32:15 crc kubenswrapper[4753]: I0129 12:32:15.268113 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rhngw\" (UniqueName: \"kubernetes.io/projected/73d3c43c-a487-4cbe-a709-833d3814f63a-kube-api-access-rhngw\") pod \"swift-storage-0\" (UID: \"73d3c43c-a487-4cbe-a709-833d3814f63a\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:32:15 crc kubenswrapper[4753]: I0129 12:32:15.269844 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jn48d\" (UniqueName: \"kubernetes.io/projected/5188a03d-cba0-4bbb-9c43-5f1ff77cf44d-kube-api-access-jn48d\") pod \"swift-ring-rebalance-rkchn\" (UID: \"5188a03d-cba0-4bbb-9c43-5f1ff77cf44d\") " pod="swift-kuttl-tests/swift-ring-rebalance-rkchn" Jan 29 12:32:15 crc kubenswrapper[4753]: I0129 12:32:15.276932 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"swift-storage-0\" (UID: \"73d3c43c-a487-4cbe-a709-833d3814f63a\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:32:15 crc kubenswrapper[4753]: I0129 12:32:15.413360 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["swift-kuttl-tests/swift-proxy-8695cf79dc-6tgzq"] Jan 29 12:32:15 crc kubenswrapper[4753]: I0129 12:32:15.414854 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-proxy-8695cf79dc-6tgzq" Jan 29 12:32:15 crc kubenswrapper[4753]: I0129 12:32:15.428919 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/swift-proxy-8695cf79dc-6tgzq"] Jan 29 12:32:15 crc kubenswrapper[4753]: I0129 12:32:15.450916 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5cd837fc-3257-446b-a556-381501bf95da-log-httpd\") pod \"swift-proxy-8695cf79dc-6tgzq\" (UID: \"5cd837fc-3257-446b-a556-381501bf95da\") " pod="swift-kuttl-tests/swift-proxy-8695cf79dc-6tgzq" Jan 29 12:32:15 crc kubenswrapper[4753]: I0129 12:32:15.451015 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sb8vv\" (UniqueName: \"kubernetes.io/projected/5cd837fc-3257-446b-a556-381501bf95da-kube-api-access-sb8vv\") pod \"swift-proxy-8695cf79dc-6tgzq\" (UID: \"5cd837fc-3257-446b-a556-381501bf95da\") " pod="swift-kuttl-tests/swift-proxy-8695cf79dc-6tgzq" Jan 29 12:32:15 crc kubenswrapper[4753]: I0129 12:32:15.451050 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5cd837fc-3257-446b-a556-381501bf95da-run-httpd\") pod \"swift-proxy-8695cf79dc-6tgzq\" (UID: \"5cd837fc-3257-446b-a556-381501bf95da\") " pod="swift-kuttl-tests/swift-proxy-8695cf79dc-6tgzq" Jan 29 12:32:15 crc kubenswrapper[4753]: I0129 12:32:15.451078 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/5cd837fc-3257-446b-a556-381501bf95da-etc-swift\") pod \"swift-proxy-8695cf79dc-6tgzq\" (UID: \"5cd837fc-3257-446b-a556-381501bf95da\") " pod="swift-kuttl-tests/swift-proxy-8695cf79dc-6tgzq" Jan 29 12:32:15 crc kubenswrapper[4753]: I0129 12:32:15.451104 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5cd837fc-3257-446b-a556-381501bf95da-config-data\") pod \"swift-proxy-8695cf79dc-6tgzq\" (UID: \"5cd837fc-3257-446b-a556-381501bf95da\") " pod="swift-kuttl-tests/swift-proxy-8695cf79dc-6tgzq" Jan 29 12:32:15 crc kubenswrapper[4753]: I0129 12:32:15.585104 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5cd837fc-3257-446b-a556-381501bf95da-log-httpd\") pod \"swift-proxy-8695cf79dc-6tgzq\" (UID: \"5cd837fc-3257-446b-a556-381501bf95da\") " pod="swift-kuttl-tests/swift-proxy-8695cf79dc-6tgzq" Jan 29 12:32:15 crc kubenswrapper[4753]: I0129 12:32:15.585200 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sb8vv\" (UniqueName: \"kubernetes.io/projected/5cd837fc-3257-446b-a556-381501bf95da-kube-api-access-sb8vv\") pod \"swift-proxy-8695cf79dc-6tgzq\" (UID: \"5cd837fc-3257-446b-a556-381501bf95da\") " pod="swift-kuttl-tests/swift-proxy-8695cf79dc-6tgzq" Jan 29 12:32:15 crc kubenswrapper[4753]: I0129 12:32:15.585247 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5cd837fc-3257-446b-a556-381501bf95da-run-httpd\") pod \"swift-proxy-8695cf79dc-6tgzq\" (UID: \"5cd837fc-3257-446b-a556-381501bf95da\") " pod="swift-kuttl-tests/swift-proxy-8695cf79dc-6tgzq" Jan 29 12:32:15 crc kubenswrapper[4753]: I0129 12:32:15.585282 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/5cd837fc-3257-446b-a556-381501bf95da-etc-swift\") pod \"swift-proxy-8695cf79dc-6tgzq\" (UID: \"5cd837fc-3257-446b-a556-381501bf95da\") " pod="swift-kuttl-tests/swift-proxy-8695cf79dc-6tgzq" Jan 29 12:32:15 crc kubenswrapper[4753]: I0129 12:32:15.585310 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5cd837fc-3257-446b-a556-381501bf95da-config-data\") pod \"swift-proxy-8695cf79dc-6tgzq\" (UID: \"5cd837fc-3257-446b-a556-381501bf95da\") " pod="swift-kuttl-tests/swift-proxy-8695cf79dc-6tgzq" Jan 29 12:32:15 crc kubenswrapper[4753]: I0129 12:32:15.586190 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5cd837fc-3257-446b-a556-381501bf95da-log-httpd\") pod \"swift-proxy-8695cf79dc-6tgzq\" (UID: \"5cd837fc-3257-446b-a556-381501bf95da\") " pod="swift-kuttl-tests/swift-proxy-8695cf79dc-6tgzq" Jan 29 12:32:15 crc kubenswrapper[4753]: I0129 12:32:15.586373 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-ring-rebalance-rkchn" Jan 29 12:32:15 crc kubenswrapper[4753]: E0129 12:32:15.586814 4753 projected.go:288] Couldn't get configMap swift-kuttl-tests/swift-ring-files: configmap "swift-ring-files" not found Jan 29 12:32:15 crc kubenswrapper[4753]: E0129 12:32:15.586853 4753 projected.go:194] Error preparing data for projected volume etc-swift for pod swift-kuttl-tests/swift-proxy-8695cf79dc-6tgzq: configmap "swift-ring-files" not found Jan 29 12:32:15 crc kubenswrapper[4753]: E0129 12:32:15.586934 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/5cd837fc-3257-446b-a556-381501bf95da-etc-swift podName:5cd837fc-3257-446b-a556-381501bf95da nodeName:}" failed. No retries permitted until 2026-01-29 12:32:16.086900146 +0000 UTC m=+1550.338981691 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/5cd837fc-3257-446b-a556-381501bf95da-etc-swift") pod "swift-proxy-8695cf79dc-6tgzq" (UID: "5cd837fc-3257-446b-a556-381501bf95da") : configmap "swift-ring-files" not found Jan 29 12:32:15 crc kubenswrapper[4753]: I0129 12:32:15.587033 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5cd837fc-3257-446b-a556-381501bf95da-run-httpd\") pod \"swift-proxy-8695cf79dc-6tgzq\" (UID: \"5cd837fc-3257-446b-a556-381501bf95da\") " pod="swift-kuttl-tests/swift-proxy-8695cf79dc-6tgzq" Jan 29 12:32:15 crc kubenswrapper[4753]: I0129 12:32:15.593239 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5cd837fc-3257-446b-a556-381501bf95da-config-data\") pod \"swift-proxy-8695cf79dc-6tgzq\" (UID: \"5cd837fc-3257-446b-a556-381501bf95da\") " pod="swift-kuttl-tests/swift-proxy-8695cf79dc-6tgzq" Jan 29 12:32:15 crc kubenswrapper[4753]: I0129 12:32:15.622522 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sb8vv\" (UniqueName: \"kubernetes.io/projected/5cd837fc-3257-446b-a556-381501bf95da-kube-api-access-sb8vv\") pod \"swift-proxy-8695cf79dc-6tgzq\" (UID: \"5cd837fc-3257-446b-a556-381501bf95da\") " pod="swift-kuttl-tests/swift-proxy-8695cf79dc-6tgzq" Jan 29 12:32:15 crc kubenswrapper[4753]: I0129 12:32:15.788975 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/73d3c43c-a487-4cbe-a709-833d3814f63a-etc-swift\") pod \"swift-storage-0\" (UID: \"73d3c43c-a487-4cbe-a709-833d3814f63a\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:32:15 crc kubenswrapper[4753]: E0129 12:32:15.789177 4753 projected.go:288] Couldn't get configMap swift-kuttl-tests/swift-ring-files: configmap "swift-ring-files" not found Jan 29 12:32:15 crc kubenswrapper[4753]: E0129 12:32:15.789537 4753 projected.go:194] Error preparing data for projected volume etc-swift for pod swift-kuttl-tests/swift-storage-0: configmap "swift-ring-files" not found Jan 29 12:32:15 crc kubenswrapper[4753]: E0129 12:32:15.789608 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/73d3c43c-a487-4cbe-a709-833d3814f63a-etc-swift podName:73d3c43c-a487-4cbe-a709-833d3814f63a nodeName:}" failed. No retries permitted until 2026-01-29 12:32:16.789585444 +0000 UTC m=+1551.041666899 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/73d3c43c-a487-4cbe-a709-833d3814f63a-etc-swift") pod "swift-storage-0" (UID: "73d3c43c-a487-4cbe-a709-833d3814f63a") : configmap "swift-ring-files" not found Jan 29 12:32:16 crc kubenswrapper[4753]: I0129 12:32:16.134551 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/5cd837fc-3257-446b-a556-381501bf95da-etc-swift\") pod \"swift-proxy-8695cf79dc-6tgzq\" (UID: \"5cd837fc-3257-446b-a556-381501bf95da\") " pod="swift-kuttl-tests/swift-proxy-8695cf79dc-6tgzq" Jan 29 12:32:16 crc kubenswrapper[4753]: E0129 12:32:16.134872 4753 projected.go:288] Couldn't get configMap swift-kuttl-tests/swift-ring-files: configmap "swift-ring-files" not found Jan 29 12:32:16 crc kubenswrapper[4753]: E0129 12:32:16.134887 4753 projected.go:194] Error preparing data for projected volume etc-swift for pod swift-kuttl-tests/swift-proxy-8695cf79dc-6tgzq: configmap "swift-ring-files" not found Jan 29 12:32:16 crc kubenswrapper[4753]: E0129 12:32:16.134935 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/5cd837fc-3257-446b-a556-381501bf95da-etc-swift podName:5cd837fc-3257-446b-a556-381501bf95da nodeName:}" failed. No retries permitted until 2026-01-29 12:32:17.134918624 +0000 UTC m=+1551.387000079 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/5cd837fc-3257-446b-a556-381501bf95da-etc-swift") pod "swift-proxy-8695cf79dc-6tgzq" (UID: "5cd837fc-3257-446b-a556-381501bf95da") : configmap "swift-ring-files" not found Jan 29 12:32:16 crc kubenswrapper[4753]: I0129 12:32:16.192366 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/swift-ring-rebalance-rkchn"] Jan 29 12:32:16 crc kubenswrapper[4753]: I0129 12:32:16.712749 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-ring-rebalance-rkchn" event={"ID":"5188a03d-cba0-4bbb-9c43-5f1ff77cf44d","Type":"ContainerStarted","Data":"23da8d2b8648a062ac86a655913b68baddecb5237d38120749304817382bdf11"} Jan 29 12:32:16 crc kubenswrapper[4753]: I0129 12:32:16.713091 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-ring-rebalance-rkchn" event={"ID":"5188a03d-cba0-4bbb-9c43-5f1ff77cf44d","Type":"ContainerStarted","Data":"7a1f0f280a453a95e15e12c64bbbc76b3fbcabcfd5d83c4c4774b4e53c23950b"} Jan 29 12:32:16 crc kubenswrapper[4753]: I0129 12:32:16.736326 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="swift-kuttl-tests/swift-ring-rebalance-rkchn" podStartSLOduration=2.736280638 podStartE2EDuration="2.736280638s" podCreationTimestamp="2026-01-29 12:32:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:32:16.730997438 +0000 UTC m=+1550.983078893" watchObservedRunningTime="2026-01-29 12:32:16.736280638 +0000 UTC m=+1550.988362083" Jan 29 12:32:16 crc kubenswrapper[4753]: I0129 12:32:16.886542 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/73d3c43c-a487-4cbe-a709-833d3814f63a-etc-swift\") pod \"swift-storage-0\" (UID: \"73d3c43c-a487-4cbe-a709-833d3814f63a\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:32:16 crc kubenswrapper[4753]: E0129 12:32:16.886833 4753 projected.go:288] Couldn't get configMap swift-kuttl-tests/swift-ring-files: configmap "swift-ring-files" not found Jan 29 12:32:16 crc kubenswrapper[4753]: E0129 12:32:16.886888 4753 projected.go:194] Error preparing data for projected volume etc-swift for pod swift-kuttl-tests/swift-storage-0: configmap "swift-ring-files" not found Jan 29 12:32:16 crc kubenswrapper[4753]: E0129 12:32:16.886965 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/73d3c43c-a487-4cbe-a709-833d3814f63a-etc-swift podName:73d3c43c-a487-4cbe-a709-833d3814f63a nodeName:}" failed. No retries permitted until 2026-01-29 12:32:18.886942498 +0000 UTC m=+1553.139023953 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/73d3c43c-a487-4cbe-a709-833d3814f63a-etc-swift") pod "swift-storage-0" (UID: "73d3c43c-a487-4cbe-a709-833d3814f63a") : configmap "swift-ring-files" not found Jan 29 12:32:17 crc kubenswrapper[4753]: I0129 12:32:17.267645 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/5cd837fc-3257-446b-a556-381501bf95da-etc-swift\") pod \"swift-proxy-8695cf79dc-6tgzq\" (UID: \"5cd837fc-3257-446b-a556-381501bf95da\") " pod="swift-kuttl-tests/swift-proxy-8695cf79dc-6tgzq" Jan 29 12:32:17 crc kubenswrapper[4753]: E0129 12:32:17.267868 4753 projected.go:288] Couldn't get configMap swift-kuttl-tests/swift-ring-files: configmap "swift-ring-files" not found Jan 29 12:32:17 crc kubenswrapper[4753]: E0129 12:32:17.267886 4753 projected.go:194] Error preparing data for projected volume etc-swift for pod swift-kuttl-tests/swift-proxy-8695cf79dc-6tgzq: configmap "swift-ring-files" not found Jan 29 12:32:17 crc kubenswrapper[4753]: E0129 12:32:17.267939 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/5cd837fc-3257-446b-a556-381501bf95da-etc-swift podName:5cd837fc-3257-446b-a556-381501bf95da nodeName:}" failed. No retries permitted until 2026-01-29 12:32:19.267920449 +0000 UTC m=+1553.520001904 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/5cd837fc-3257-446b-a556-381501bf95da-etc-swift") pod "swift-proxy-8695cf79dc-6tgzq" (UID: "5cd837fc-3257-446b-a556-381501bf95da") : configmap "swift-ring-files" not found Jan 29 12:32:18 crc kubenswrapper[4753]: I0129 12:32:18.908660 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/73d3c43c-a487-4cbe-a709-833d3814f63a-etc-swift\") pod \"swift-storage-0\" (UID: \"73d3c43c-a487-4cbe-a709-833d3814f63a\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:32:18 crc kubenswrapper[4753]: E0129 12:32:18.908876 4753 projected.go:288] Couldn't get configMap swift-kuttl-tests/swift-ring-files: configmap "swift-ring-files" not found Jan 29 12:32:18 crc kubenswrapper[4753]: E0129 12:32:18.909045 4753 projected.go:194] Error preparing data for projected volume etc-swift for pod swift-kuttl-tests/swift-storage-0: configmap "swift-ring-files" not found Jan 29 12:32:18 crc kubenswrapper[4753]: E0129 12:32:18.909109 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/73d3c43c-a487-4cbe-a709-833d3814f63a-etc-swift podName:73d3c43c-a487-4cbe-a709-833d3814f63a nodeName:}" failed. No retries permitted until 2026-01-29 12:32:22.909089122 +0000 UTC m=+1557.161170577 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/73d3c43c-a487-4cbe-a709-833d3814f63a-etc-swift") pod "swift-storage-0" (UID: "73d3c43c-a487-4cbe-a709-833d3814f63a") : configmap "swift-ring-files" not found Jan 29 12:32:19 crc kubenswrapper[4753]: I0129 12:32:19.323011 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/5cd837fc-3257-446b-a556-381501bf95da-etc-swift\") pod \"swift-proxy-8695cf79dc-6tgzq\" (UID: \"5cd837fc-3257-446b-a556-381501bf95da\") " pod="swift-kuttl-tests/swift-proxy-8695cf79dc-6tgzq" Jan 29 12:32:19 crc kubenswrapper[4753]: E0129 12:32:19.323291 4753 projected.go:288] Couldn't get configMap swift-kuttl-tests/swift-ring-files: configmap "swift-ring-files" not found Jan 29 12:32:19 crc kubenswrapper[4753]: E0129 12:32:19.323319 4753 projected.go:194] Error preparing data for projected volume etc-swift for pod swift-kuttl-tests/swift-proxy-8695cf79dc-6tgzq: configmap "swift-ring-files" not found Jan 29 12:32:19 crc kubenswrapper[4753]: E0129 12:32:19.323392 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/5cd837fc-3257-446b-a556-381501bf95da-etc-swift podName:5cd837fc-3257-446b-a556-381501bf95da nodeName:}" failed. No retries permitted until 2026-01-29 12:32:23.323366841 +0000 UTC m=+1557.575448316 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/5cd837fc-3257-446b-a556-381501bf95da-etc-swift") pod "swift-proxy-8695cf79dc-6tgzq" (UID: "5cd837fc-3257-446b-a556-381501bf95da") : configmap "swift-ring-files" not found Jan 29 12:32:22 crc kubenswrapper[4753]: I0129 12:32:22.994764 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/73d3c43c-a487-4cbe-a709-833d3814f63a-etc-swift\") pod \"swift-storage-0\" (UID: \"73d3c43c-a487-4cbe-a709-833d3814f63a\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:32:22 crc kubenswrapper[4753]: E0129 12:32:22.995628 4753 projected.go:288] Couldn't get configMap swift-kuttl-tests/swift-ring-files: configmap "swift-ring-files" not found Jan 29 12:32:22 crc kubenswrapper[4753]: E0129 12:32:22.995650 4753 projected.go:194] Error preparing data for projected volume etc-swift for pod swift-kuttl-tests/swift-storage-0: configmap "swift-ring-files" not found Jan 29 12:32:22 crc kubenswrapper[4753]: E0129 12:32:22.995708 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/73d3c43c-a487-4cbe-a709-833d3814f63a-etc-swift podName:73d3c43c-a487-4cbe-a709-833d3814f63a nodeName:}" failed. No retries permitted until 2026-01-29 12:32:30.995688994 +0000 UTC m=+1565.247770449 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/73d3c43c-a487-4cbe-a709-833d3814f63a-etc-swift") pod "swift-storage-0" (UID: "73d3c43c-a487-4cbe-a709-833d3814f63a") : configmap "swift-ring-files" not found Jan 29 12:32:23 crc kubenswrapper[4753]: I0129 12:32:23.386820 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/5cd837fc-3257-446b-a556-381501bf95da-etc-swift\") pod \"swift-proxy-8695cf79dc-6tgzq\" (UID: \"5cd837fc-3257-446b-a556-381501bf95da\") " pod="swift-kuttl-tests/swift-proxy-8695cf79dc-6tgzq" Jan 29 12:32:23 crc kubenswrapper[4753]: E0129 12:32:23.387120 4753 projected.go:288] Couldn't get configMap swift-kuttl-tests/swift-ring-files: configmap "swift-ring-files" not found Jan 29 12:32:23 crc kubenswrapper[4753]: E0129 12:32:23.387135 4753 projected.go:194] Error preparing data for projected volume etc-swift for pod swift-kuttl-tests/swift-proxy-8695cf79dc-6tgzq: configmap "swift-ring-files" not found Jan 29 12:32:23 crc kubenswrapper[4753]: E0129 12:32:23.387193 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/5cd837fc-3257-446b-a556-381501bf95da-etc-swift podName:5cd837fc-3257-446b-a556-381501bf95da nodeName:}" failed. No retries permitted until 2026-01-29 12:32:31.387167665 +0000 UTC m=+1565.639249120 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/5cd837fc-3257-446b-a556-381501bf95da-etc-swift") pod "swift-proxy-8695cf79dc-6tgzq" (UID: "5cd837fc-3257-446b-a556-381501bf95da") : configmap "swift-ring-files" not found Jan 29 12:32:24 crc kubenswrapper[4753]: I0129 12:32:24.907070 4753 generic.go:334] "Generic (PLEG): container finished" podID="5188a03d-cba0-4bbb-9c43-5f1ff77cf44d" containerID="23da8d2b8648a062ac86a655913b68baddecb5237d38120749304817382bdf11" exitCode=0 Jan 29 12:32:24 crc kubenswrapper[4753]: I0129 12:32:24.907130 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-ring-rebalance-rkchn" event={"ID":"5188a03d-cba0-4bbb-9c43-5f1ff77cf44d","Type":"ContainerDied","Data":"23da8d2b8648a062ac86a655913b68baddecb5237d38120749304817382bdf11"} Jan 29 12:32:26 crc kubenswrapper[4753]: I0129 12:32:26.522162 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-ring-rebalance-rkchn" Jan 29 12:32:26 crc kubenswrapper[4753]: I0129 12:32:26.705036 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jn48d\" (UniqueName: \"kubernetes.io/projected/5188a03d-cba0-4bbb-9c43-5f1ff77cf44d-kube-api-access-jn48d\") pod \"5188a03d-cba0-4bbb-9c43-5f1ff77cf44d\" (UID: \"5188a03d-cba0-4bbb-9c43-5f1ff77cf44d\") " Jan 29 12:32:26 crc kubenswrapper[4753]: I0129 12:32:26.705107 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/5188a03d-cba0-4bbb-9c43-5f1ff77cf44d-swiftconf\") pod \"5188a03d-cba0-4bbb-9c43-5f1ff77cf44d\" (UID: \"5188a03d-cba0-4bbb-9c43-5f1ff77cf44d\") " Jan 29 12:32:26 crc kubenswrapper[4753]: I0129 12:32:26.705264 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/5188a03d-cba0-4bbb-9c43-5f1ff77cf44d-etc-swift\") pod \"5188a03d-cba0-4bbb-9c43-5f1ff77cf44d\" (UID: \"5188a03d-cba0-4bbb-9c43-5f1ff77cf44d\") " Jan 29 12:32:26 crc kubenswrapper[4753]: I0129 12:32:26.705355 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/5188a03d-cba0-4bbb-9c43-5f1ff77cf44d-ring-data-devices\") pod \"5188a03d-cba0-4bbb-9c43-5f1ff77cf44d\" (UID: \"5188a03d-cba0-4bbb-9c43-5f1ff77cf44d\") " Jan 29 12:32:26 crc kubenswrapper[4753]: I0129 12:32:26.705426 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5188a03d-cba0-4bbb-9c43-5f1ff77cf44d-scripts\") pod \"5188a03d-cba0-4bbb-9c43-5f1ff77cf44d\" (UID: \"5188a03d-cba0-4bbb-9c43-5f1ff77cf44d\") " Jan 29 12:32:26 crc kubenswrapper[4753]: I0129 12:32:26.705515 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/5188a03d-cba0-4bbb-9c43-5f1ff77cf44d-dispersionconf\") pod \"5188a03d-cba0-4bbb-9c43-5f1ff77cf44d\" (UID: \"5188a03d-cba0-4bbb-9c43-5f1ff77cf44d\") " Jan 29 12:32:26 crc kubenswrapper[4753]: I0129 12:32:26.706984 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5188a03d-cba0-4bbb-9c43-5f1ff77cf44d-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "5188a03d-cba0-4bbb-9c43-5f1ff77cf44d" (UID: "5188a03d-cba0-4bbb-9c43-5f1ff77cf44d"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:32:26 crc kubenswrapper[4753]: I0129 12:32:26.707111 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5188a03d-cba0-4bbb-9c43-5f1ff77cf44d-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "5188a03d-cba0-4bbb-9c43-5f1ff77cf44d" (UID: "5188a03d-cba0-4bbb-9c43-5f1ff77cf44d"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:32:26 crc kubenswrapper[4753]: I0129 12:32:26.716014 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5188a03d-cba0-4bbb-9c43-5f1ff77cf44d-kube-api-access-jn48d" (OuterVolumeSpecName: "kube-api-access-jn48d") pod "5188a03d-cba0-4bbb-9c43-5f1ff77cf44d" (UID: "5188a03d-cba0-4bbb-9c43-5f1ff77cf44d"). InnerVolumeSpecName "kube-api-access-jn48d". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:32:26 crc kubenswrapper[4753]: I0129 12:32:26.723921 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5188a03d-cba0-4bbb-9c43-5f1ff77cf44d-scripts" (OuterVolumeSpecName: "scripts") pod "5188a03d-cba0-4bbb-9c43-5f1ff77cf44d" (UID: "5188a03d-cba0-4bbb-9c43-5f1ff77cf44d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:32:26 crc kubenswrapper[4753]: E0129 12:32:26.728715 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5188a03d-cba0-4bbb-9c43-5f1ff77cf44d-dispersionconf podName:5188a03d-cba0-4bbb-9c43-5f1ff77cf44d nodeName:}" failed. No retries permitted until 2026-01-29 12:32:27.228664206 +0000 UTC m=+1561.480745661 (durationBeforeRetry 500ms). Error: error cleaning subPath mounts for volume "dispersionconf" (UniqueName: "kubernetes.io/secret/5188a03d-cba0-4bbb-9c43-5f1ff77cf44d-dispersionconf") pod "5188a03d-cba0-4bbb-9c43-5f1ff77cf44d" (UID: "5188a03d-cba0-4bbb-9c43-5f1ff77cf44d") : error deleting /var/lib/kubelet/pods/5188a03d-cba0-4bbb-9c43-5f1ff77cf44d/volume-subpaths: remove /var/lib/kubelet/pods/5188a03d-cba0-4bbb-9c43-5f1ff77cf44d/volume-subpaths: no such file or directory Jan 29 12:32:26 crc kubenswrapper[4753]: I0129 12:32:26.734432 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5188a03d-cba0-4bbb-9c43-5f1ff77cf44d-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "5188a03d-cba0-4bbb-9c43-5f1ff77cf44d" (UID: "5188a03d-cba0-4bbb-9c43-5f1ff77cf44d"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:32:26 crc kubenswrapper[4753]: I0129 12:32:26.807414 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jn48d\" (UniqueName: \"kubernetes.io/projected/5188a03d-cba0-4bbb-9c43-5f1ff77cf44d-kube-api-access-jn48d\") on node \"crc\" DevicePath \"\"" Jan 29 12:32:26 crc kubenswrapper[4753]: I0129 12:32:26.807459 4753 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/5188a03d-cba0-4bbb-9c43-5f1ff77cf44d-swiftconf\") on node \"crc\" DevicePath \"\"" Jan 29 12:32:26 crc kubenswrapper[4753]: I0129 12:32:26.807469 4753 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/5188a03d-cba0-4bbb-9c43-5f1ff77cf44d-etc-swift\") on node \"crc\" DevicePath \"\"" Jan 29 12:32:26 crc kubenswrapper[4753]: I0129 12:32:26.807477 4753 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/5188a03d-cba0-4bbb-9c43-5f1ff77cf44d-ring-data-devices\") on node \"crc\" DevicePath \"\"" Jan 29 12:32:26 crc kubenswrapper[4753]: I0129 12:32:26.807486 4753 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5188a03d-cba0-4bbb-9c43-5f1ff77cf44d-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 12:32:27 crc kubenswrapper[4753]: I0129 12:32:27.040148 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-ring-rebalance-rkchn" event={"ID":"5188a03d-cba0-4bbb-9c43-5f1ff77cf44d","Type":"ContainerDied","Data":"7a1f0f280a453a95e15e12c64bbbc76b3fbcabcfd5d83c4c4774b4e53c23950b"} Jan 29 12:32:27 crc kubenswrapper[4753]: I0129 12:32:27.040189 4753 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7a1f0f280a453a95e15e12c64bbbc76b3fbcabcfd5d83c4c4774b4e53c23950b" Jan 29 12:32:27 crc kubenswrapper[4753]: I0129 12:32:27.040242 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-ring-rebalance-rkchn" Jan 29 12:32:27 crc kubenswrapper[4753]: I0129 12:32:27.326440 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/5188a03d-cba0-4bbb-9c43-5f1ff77cf44d-dispersionconf\") pod \"5188a03d-cba0-4bbb-9c43-5f1ff77cf44d\" (UID: \"5188a03d-cba0-4bbb-9c43-5f1ff77cf44d\") " Jan 29 12:32:27 crc kubenswrapper[4753]: I0129 12:32:27.329670 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5188a03d-cba0-4bbb-9c43-5f1ff77cf44d-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "5188a03d-cba0-4bbb-9c43-5f1ff77cf44d" (UID: "5188a03d-cba0-4bbb-9c43-5f1ff77cf44d"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:32:27 crc kubenswrapper[4753]: I0129 12:32:27.427959 4753 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/5188a03d-cba0-4bbb-9c43-5f1ff77cf44d-dispersionconf\") on node \"crc\" DevicePath \"\"" Jan 29 12:32:29 crc kubenswrapper[4753]: I0129 12:32:29.252539 4753 patch_prober.go:28] interesting pod/machine-config-daemon-7c24x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 12:32:29 crc kubenswrapper[4753]: I0129 12:32:29.252916 4753 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" podUID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 12:32:31 crc kubenswrapper[4753]: I0129 12:32:31.037627 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/73d3c43c-a487-4cbe-a709-833d3814f63a-etc-swift\") pod \"swift-storage-0\" (UID: \"73d3c43c-a487-4cbe-a709-833d3814f63a\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:32:31 crc kubenswrapper[4753]: I0129 12:32:31.045063 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/73d3c43c-a487-4cbe-a709-833d3814f63a-etc-swift\") pod \"swift-storage-0\" (UID: \"73d3c43c-a487-4cbe-a709-833d3814f63a\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:32:31 crc kubenswrapper[4753]: I0129 12:32:31.122729 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:32:31 crc kubenswrapper[4753]: I0129 12:32:31.460319 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/5cd837fc-3257-446b-a556-381501bf95da-etc-swift\") pod \"swift-proxy-8695cf79dc-6tgzq\" (UID: \"5cd837fc-3257-446b-a556-381501bf95da\") " pod="swift-kuttl-tests/swift-proxy-8695cf79dc-6tgzq" Jan 29 12:32:31 crc kubenswrapper[4753]: I0129 12:32:31.469625 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/5cd837fc-3257-446b-a556-381501bf95da-etc-swift\") pod \"swift-proxy-8695cf79dc-6tgzq\" (UID: \"5cd837fc-3257-446b-a556-381501bf95da\") " pod="swift-kuttl-tests/swift-proxy-8695cf79dc-6tgzq" Jan 29 12:32:31 crc kubenswrapper[4753]: I0129 12:32:31.633494 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-proxy-8695cf79dc-6tgzq" Jan 29 12:32:31 crc kubenswrapper[4753]: I0129 12:32:31.690655 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/swift-storage-0"] Jan 29 12:32:32 crc kubenswrapper[4753]: I0129 12:32:32.119444 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"73d3c43c-a487-4cbe-a709-833d3814f63a","Type":"ContainerStarted","Data":"85901a21b03cb05ab63534ba59766d3e5db5f022fa4ef57a44a5763481c89795"} Jan 29 12:32:32 crc kubenswrapper[4753]: I0129 12:32:32.119867 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"73d3c43c-a487-4cbe-a709-833d3814f63a","Type":"ContainerStarted","Data":"94deb8d5c82a323b720d1664655132f29900b81cee4cbdc08911cb7fe2de91ee"} Jan 29 12:32:32 crc kubenswrapper[4753]: W0129 12:32:32.160599 4753 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5cd837fc_3257_446b_a556_381501bf95da.slice/crio-28d116828e4aa2e048748d4499badbee9bd9fd8324685813749109fc0f8f876b WatchSource:0}: Error finding container 28d116828e4aa2e048748d4499badbee9bd9fd8324685813749109fc0f8f876b: Status 404 returned error can't find the container with id 28d116828e4aa2e048748d4499badbee9bd9fd8324685813749109fc0f8f876b Jan 29 12:32:32 crc kubenswrapper[4753]: I0129 12:32:32.161414 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/swift-proxy-8695cf79dc-6tgzq"] Jan 29 12:32:33 crc kubenswrapper[4753]: I0129 12:32:33.302376 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-proxy-8695cf79dc-6tgzq" event={"ID":"5cd837fc-3257-446b-a556-381501bf95da","Type":"ContainerStarted","Data":"8735cb8d474bbb7d151cf8c711d947d690f3b2519906d4a1d086c18431e4a215"} Jan 29 12:32:33 crc kubenswrapper[4753]: I0129 12:32:33.302717 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-proxy-8695cf79dc-6tgzq" event={"ID":"5cd837fc-3257-446b-a556-381501bf95da","Type":"ContainerStarted","Data":"acedc6bfe58c089a8ea686fa0f17b0037e521119226c6fbf605d5814940ad4bd"} Jan 29 12:32:33 crc kubenswrapper[4753]: I0129 12:32:33.302745 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-proxy-8695cf79dc-6tgzq" event={"ID":"5cd837fc-3257-446b-a556-381501bf95da","Type":"ContainerStarted","Data":"28d116828e4aa2e048748d4499badbee9bd9fd8324685813749109fc0f8f876b"} Jan 29 12:32:33 crc kubenswrapper[4753]: I0129 12:32:33.302777 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="swift-kuttl-tests/swift-proxy-8695cf79dc-6tgzq" Jan 29 12:32:33 crc kubenswrapper[4753]: I0129 12:32:33.302816 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="swift-kuttl-tests/swift-proxy-8695cf79dc-6tgzq" Jan 29 12:32:33 crc kubenswrapper[4753]: I0129 12:32:33.363168 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"73d3c43c-a487-4cbe-a709-833d3814f63a","Type":"ContainerStarted","Data":"0c2306ddbfab7861c3d8c00e01664bfcee1600c5232e90de7e0b99398486c0f2"} Jan 29 12:32:33 crc kubenswrapper[4753]: I0129 12:32:33.363622 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"73d3c43c-a487-4cbe-a709-833d3814f63a","Type":"ContainerStarted","Data":"c41912cf54b65b01bda7208bb5de90c51fa6e0b614418bc1b81e4d0f739ae254"} Jan 29 12:32:33 crc kubenswrapper[4753]: I0129 12:32:33.363666 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"73d3c43c-a487-4cbe-a709-833d3814f63a","Type":"ContainerStarted","Data":"5954307440c49857c3884bc677c6c2c89aedf9990fc971b9c3c4d098f59608f3"} Jan 29 12:32:33 crc kubenswrapper[4753]: I0129 12:32:33.363686 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"73d3c43c-a487-4cbe-a709-833d3814f63a","Type":"ContainerStarted","Data":"5306dcba72c8b09ae9b789f532b3b2832b52b3ee90a7a0241b4b3ab378c7447c"} Jan 29 12:32:33 crc kubenswrapper[4753]: I0129 12:32:33.363696 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"73d3c43c-a487-4cbe-a709-833d3814f63a","Type":"ContainerStarted","Data":"e5ad87c4bb81c3f7473b77a68e3954fb7d2ea28653225d2b6391291fd359f6bb"} Jan 29 12:32:34 crc kubenswrapper[4753]: I0129 12:32:34.379363 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"73d3c43c-a487-4cbe-a709-833d3814f63a","Type":"ContainerStarted","Data":"fa72c391721aa00dc31c6e7814fc8821cbc5f688af44d0e7b8dfb889bbd67035"} Jan 29 12:32:34 crc kubenswrapper[4753]: I0129 12:32:34.379442 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"73d3c43c-a487-4cbe-a709-833d3814f63a","Type":"ContainerStarted","Data":"e44812da75dd26639443b2c1f81bd677b15708e2595e59965d98d3c832858ea0"} Jan 29 12:32:34 crc kubenswrapper[4753]: I0129 12:32:34.379463 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"73d3c43c-a487-4cbe-a709-833d3814f63a","Type":"ContainerStarted","Data":"3e8c87fdb4f6b86452fbe3671f11b56d2d62c3247a473f41d0495bcef3fcdcc0"} Jan 29 12:32:34 crc kubenswrapper[4753]: I0129 12:32:34.379484 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"73d3c43c-a487-4cbe-a709-833d3814f63a","Type":"ContainerStarted","Data":"3f6621ad2e58b4a13f2b99f35f270528fe0246f67d39c0365b9fbc030c6572e2"} Jan 29 12:32:35 crc kubenswrapper[4753]: I0129 12:32:35.393195 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"73d3c43c-a487-4cbe-a709-833d3814f63a","Type":"ContainerStarted","Data":"52b28e88e824c6b15431ab55f4cc5b602d678bc924fa3aa5f0da872b74f9ace1"} Jan 29 12:32:35 crc kubenswrapper[4753]: I0129 12:32:35.393271 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"73d3c43c-a487-4cbe-a709-833d3814f63a","Type":"ContainerStarted","Data":"188a863138cbf7458190657501c965dc65f9d80da0573bd2fc6dcb31f3433e43"} Jan 29 12:32:36 crc kubenswrapper[4753]: I0129 12:32:36.409964 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"73d3c43c-a487-4cbe-a709-833d3814f63a","Type":"ContainerStarted","Data":"d2cf1d3b83ecb066e0bed30a672e7ba8aaa3c97e9260bb934ffb23a7dea23f19"} Jan 29 12:32:36 crc kubenswrapper[4753]: I0129 12:32:36.410311 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"73d3c43c-a487-4cbe-a709-833d3814f63a","Type":"ContainerStarted","Data":"34e6277ec7168ded71495f4f3d65b9b73a1eaa282bdabbffa51a0a4c097682c7"} Jan 29 12:32:36 crc kubenswrapper[4753]: I0129 12:32:36.410322 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"73d3c43c-a487-4cbe-a709-833d3814f63a","Type":"ContainerStarted","Data":"d3c3cd604542f57234212255910da9908efff1b32ba20df662a743db9bc924bf"} Jan 29 12:32:36 crc kubenswrapper[4753]: I0129 12:32:36.458544 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="swift-kuttl-tests/swift-storage-0" podStartSLOduration=23.458517004 podStartE2EDuration="23.458517004s" podCreationTimestamp="2026-01-29 12:32:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:32:36.456321021 +0000 UTC m=+1570.708402476" watchObservedRunningTime="2026-01-29 12:32:36.458517004 +0000 UTC m=+1570.710598459" Jan 29 12:32:36 crc kubenswrapper[4753]: I0129 12:32:36.464902 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="swift-kuttl-tests/swift-proxy-8695cf79dc-6tgzq" podStartSLOduration=21.464879717 podStartE2EDuration="21.464879717s" podCreationTimestamp="2026-01-29 12:32:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:32:33.342735183 +0000 UTC m=+1567.594816648" watchObservedRunningTime="2026-01-29 12:32:36.464879717 +0000 UTC m=+1570.716961172" Jan 29 12:32:41 crc kubenswrapper[4753]: I0129 12:32:41.636964 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="swift-kuttl-tests/swift-proxy-8695cf79dc-6tgzq" Jan 29 12:32:41 crc kubenswrapper[4753]: I0129 12:32:41.638374 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="swift-kuttl-tests/swift-proxy-8695cf79dc-6tgzq" Jan 29 12:32:42 crc kubenswrapper[4753]: I0129 12:32:42.786585 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["swift-kuttl-tests/swift-storage-0"] Jan 29 12:32:42 crc kubenswrapper[4753]: I0129 12:32:42.787315 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-0" podUID="73d3c43c-a487-4cbe-a709-833d3814f63a" containerName="account-server" containerID="cri-o://85901a21b03cb05ab63534ba59766d3e5db5f022fa4ef57a44a5763481c89795" gracePeriod=30 Jan 29 12:32:42 crc kubenswrapper[4753]: I0129 12:32:42.787386 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-0" podUID="73d3c43c-a487-4cbe-a709-833d3814f63a" containerName="container-server" containerID="cri-o://c41912cf54b65b01bda7208bb5de90c51fa6e0b614418bc1b81e4d0f739ae254" gracePeriod=30 Jan 29 12:32:42 crc kubenswrapper[4753]: I0129 12:32:42.787442 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-0" podUID="73d3c43c-a487-4cbe-a709-833d3814f63a" containerName="object-auditor" containerID="cri-o://188a863138cbf7458190657501c965dc65f9d80da0573bd2fc6dcb31f3433e43" gracePeriod=30 Jan 29 12:32:42 crc kubenswrapper[4753]: I0129 12:32:42.787472 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-0" podUID="73d3c43c-a487-4cbe-a709-833d3814f63a" containerName="object-updater" containerID="cri-o://52b28e88e824c6b15431ab55f4cc5b602d678bc924fa3aa5f0da872b74f9ace1" gracePeriod=30 Jan 29 12:32:42 crc kubenswrapper[4753]: I0129 12:32:42.787520 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-0" podUID="73d3c43c-a487-4cbe-a709-833d3814f63a" containerName="rsync" containerID="cri-o://34e6277ec7168ded71495f4f3d65b9b73a1eaa282bdabbffa51a0a4c097682c7" gracePeriod=30 Jan 29 12:32:42 crc kubenswrapper[4753]: I0129 12:32:42.787563 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-0" podUID="73d3c43c-a487-4cbe-a709-833d3814f63a" containerName="object-replicator" containerID="cri-o://fa72c391721aa00dc31c6e7814fc8821cbc5f688af44d0e7b8dfb889bbd67035" gracePeriod=30 Jan 29 12:32:42 crc kubenswrapper[4753]: I0129 12:32:42.787592 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-0" podUID="73d3c43c-a487-4cbe-a709-833d3814f63a" containerName="container-updater" containerID="cri-o://3e8c87fdb4f6b86452fbe3671f11b56d2d62c3247a473f41d0495bcef3fcdcc0" gracePeriod=30 Jan 29 12:32:42 crc kubenswrapper[4753]: I0129 12:32:42.787436 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-0" podUID="73d3c43c-a487-4cbe-a709-833d3814f63a" containerName="swift-recon-cron" containerID="cri-o://d2cf1d3b83ecb066e0bed30a672e7ba8aaa3c97e9260bb934ffb23a7dea23f19" gracePeriod=30 Jan 29 12:32:42 crc kubenswrapper[4753]: I0129 12:32:42.787637 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-0" podUID="73d3c43c-a487-4cbe-a709-833d3814f63a" containerName="account-auditor" containerID="cri-o://5306dcba72c8b09ae9b789f532b3b2832b52b3ee90a7a0241b4b3ab378c7447c" gracePeriod=30 Jan 29 12:32:42 crc kubenswrapper[4753]: I0129 12:32:42.787648 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-0" podUID="73d3c43c-a487-4cbe-a709-833d3814f63a" containerName="account-replicator" containerID="cri-o://e5ad87c4bb81c3f7473b77a68e3954fb7d2ea28653225d2b6391291fd359f6bb" gracePeriod=30 Jan 29 12:32:42 crc kubenswrapper[4753]: I0129 12:32:42.787655 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-0" podUID="73d3c43c-a487-4cbe-a709-833d3814f63a" containerName="container-auditor" containerID="cri-o://3f6621ad2e58b4a13f2b99f35f270528fe0246f67d39c0365b9fbc030c6572e2" gracePeriod=30 Jan 29 12:32:42 crc kubenswrapper[4753]: I0129 12:32:42.787433 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-0" podUID="73d3c43c-a487-4cbe-a709-833d3814f63a" containerName="object-expirer" containerID="cri-o://d3c3cd604542f57234212255910da9908efff1b32ba20df662a743db9bc924bf" gracePeriod=30 Jan 29 12:32:42 crc kubenswrapper[4753]: I0129 12:32:42.787703 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-0" podUID="73d3c43c-a487-4cbe-a709-833d3814f63a" containerName="container-replicator" containerID="cri-o://0c2306ddbfab7861c3d8c00e01664bfcee1600c5232e90de7e0b99398486c0f2" gracePeriod=30 Jan 29 12:32:42 crc kubenswrapper[4753]: I0129 12:32:42.787687 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-0" podUID="73d3c43c-a487-4cbe-a709-833d3814f63a" containerName="account-reaper" containerID="cri-o://5954307440c49857c3884bc677c6c2c89aedf9990fc971b9c3c4d098f59608f3" gracePeriod=30 Jan 29 12:32:42 crc kubenswrapper[4753]: I0129 12:32:42.787771 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-0" podUID="73d3c43c-a487-4cbe-a709-833d3814f63a" containerName="object-server" containerID="cri-o://e44812da75dd26639443b2c1f81bd677b15708e2595e59965d98d3c832858ea0" gracePeriod=30 Jan 29 12:32:42 crc kubenswrapper[4753]: I0129 12:32:42.806639 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["swift-kuttl-tests/swift-ring-rebalance-rkchn"] Jan 29 12:32:42 crc kubenswrapper[4753]: I0129 12:32:42.815024 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["swift-kuttl-tests/swift-ring-rebalance-rkchn"] Jan 29 12:32:43 crc kubenswrapper[4753]: I0129 12:32:43.373507 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["swift-kuttl-tests/swift-proxy-8695cf79dc-6tgzq"] Jan 29 12:32:43 crc kubenswrapper[4753]: I0129 12:32:43.497086 4753 generic.go:334] "Generic (PLEG): container finished" podID="73d3c43c-a487-4cbe-a709-833d3814f63a" containerID="34e6277ec7168ded71495f4f3d65b9b73a1eaa282bdabbffa51a0a4c097682c7" exitCode=0 Jan 29 12:32:43 crc kubenswrapper[4753]: I0129 12:32:43.497119 4753 generic.go:334] "Generic (PLEG): container finished" podID="73d3c43c-a487-4cbe-a709-833d3814f63a" containerID="d3c3cd604542f57234212255910da9908efff1b32ba20df662a743db9bc924bf" exitCode=0 Jan 29 12:32:43 crc kubenswrapper[4753]: I0129 12:32:43.497130 4753 generic.go:334] "Generic (PLEG): container finished" podID="73d3c43c-a487-4cbe-a709-833d3814f63a" containerID="52b28e88e824c6b15431ab55f4cc5b602d678bc924fa3aa5f0da872b74f9ace1" exitCode=0 Jan 29 12:32:43 crc kubenswrapper[4753]: I0129 12:32:43.497139 4753 generic.go:334] "Generic (PLEG): container finished" podID="73d3c43c-a487-4cbe-a709-833d3814f63a" containerID="188a863138cbf7458190657501c965dc65f9d80da0573bd2fc6dcb31f3433e43" exitCode=0 Jan 29 12:32:43 crc kubenswrapper[4753]: I0129 12:32:43.497148 4753 generic.go:334] "Generic (PLEG): container finished" podID="73d3c43c-a487-4cbe-a709-833d3814f63a" containerID="fa72c391721aa00dc31c6e7814fc8821cbc5f688af44d0e7b8dfb889bbd67035" exitCode=0 Jan 29 12:32:43 crc kubenswrapper[4753]: I0129 12:32:43.497156 4753 generic.go:334] "Generic (PLEG): container finished" podID="73d3c43c-a487-4cbe-a709-833d3814f63a" containerID="e44812da75dd26639443b2c1f81bd677b15708e2595e59965d98d3c832858ea0" exitCode=0 Jan 29 12:32:43 crc kubenswrapper[4753]: I0129 12:32:43.497149 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"73d3c43c-a487-4cbe-a709-833d3814f63a","Type":"ContainerDied","Data":"34e6277ec7168ded71495f4f3d65b9b73a1eaa282bdabbffa51a0a4c097682c7"} Jan 29 12:32:43 crc kubenswrapper[4753]: I0129 12:32:43.497200 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"73d3c43c-a487-4cbe-a709-833d3814f63a","Type":"ContainerDied","Data":"d3c3cd604542f57234212255910da9908efff1b32ba20df662a743db9bc924bf"} Jan 29 12:32:43 crc kubenswrapper[4753]: I0129 12:32:43.497216 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"73d3c43c-a487-4cbe-a709-833d3814f63a","Type":"ContainerDied","Data":"52b28e88e824c6b15431ab55f4cc5b602d678bc924fa3aa5f0da872b74f9ace1"} Jan 29 12:32:43 crc kubenswrapper[4753]: I0129 12:32:43.497167 4753 generic.go:334] "Generic (PLEG): container finished" podID="73d3c43c-a487-4cbe-a709-833d3814f63a" containerID="3e8c87fdb4f6b86452fbe3671f11b56d2d62c3247a473f41d0495bcef3fcdcc0" exitCode=0 Jan 29 12:32:43 crc kubenswrapper[4753]: I0129 12:32:43.497256 4753 generic.go:334] "Generic (PLEG): container finished" podID="73d3c43c-a487-4cbe-a709-833d3814f63a" containerID="3f6621ad2e58b4a13f2b99f35f270528fe0246f67d39c0365b9fbc030c6572e2" exitCode=0 Jan 29 12:32:43 crc kubenswrapper[4753]: I0129 12:32:43.497267 4753 generic.go:334] "Generic (PLEG): container finished" podID="73d3c43c-a487-4cbe-a709-833d3814f63a" containerID="0c2306ddbfab7861c3d8c00e01664bfcee1600c5232e90de7e0b99398486c0f2" exitCode=0 Jan 29 12:32:43 crc kubenswrapper[4753]: I0129 12:32:43.497275 4753 generic.go:334] "Generic (PLEG): container finished" podID="73d3c43c-a487-4cbe-a709-833d3814f63a" containerID="c41912cf54b65b01bda7208bb5de90c51fa6e0b614418bc1b81e4d0f739ae254" exitCode=0 Jan 29 12:32:43 crc kubenswrapper[4753]: I0129 12:32:43.497285 4753 generic.go:334] "Generic (PLEG): container finished" podID="73d3c43c-a487-4cbe-a709-833d3814f63a" containerID="5954307440c49857c3884bc677c6c2c89aedf9990fc971b9c3c4d098f59608f3" exitCode=0 Jan 29 12:32:43 crc kubenswrapper[4753]: I0129 12:32:43.497291 4753 generic.go:334] "Generic (PLEG): container finished" podID="73d3c43c-a487-4cbe-a709-833d3814f63a" containerID="5306dcba72c8b09ae9b789f532b3b2832b52b3ee90a7a0241b4b3ab378c7447c" exitCode=0 Jan 29 12:32:43 crc kubenswrapper[4753]: I0129 12:32:43.497299 4753 generic.go:334] "Generic (PLEG): container finished" podID="73d3c43c-a487-4cbe-a709-833d3814f63a" containerID="e5ad87c4bb81c3f7473b77a68e3954fb7d2ea28653225d2b6391291fd359f6bb" exitCode=0 Jan 29 12:32:43 crc kubenswrapper[4753]: I0129 12:32:43.497307 4753 generic.go:334] "Generic (PLEG): container finished" podID="73d3c43c-a487-4cbe-a709-833d3814f63a" containerID="85901a21b03cb05ab63534ba59766d3e5db5f022fa4ef57a44a5763481c89795" exitCode=0 Jan 29 12:32:43 crc kubenswrapper[4753]: I0129 12:32:43.497244 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"73d3c43c-a487-4cbe-a709-833d3814f63a","Type":"ContainerDied","Data":"188a863138cbf7458190657501c965dc65f9d80da0573bd2fc6dcb31f3433e43"} Jan 29 12:32:43 crc kubenswrapper[4753]: I0129 12:32:43.497405 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"73d3c43c-a487-4cbe-a709-833d3814f63a","Type":"ContainerDied","Data":"fa72c391721aa00dc31c6e7814fc8821cbc5f688af44d0e7b8dfb889bbd67035"} Jan 29 12:32:43 crc kubenswrapper[4753]: I0129 12:32:43.497431 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"73d3c43c-a487-4cbe-a709-833d3814f63a","Type":"ContainerDied","Data":"e44812da75dd26639443b2c1f81bd677b15708e2595e59965d98d3c832858ea0"} Jan 29 12:32:43 crc kubenswrapper[4753]: I0129 12:32:43.497445 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"73d3c43c-a487-4cbe-a709-833d3814f63a","Type":"ContainerDied","Data":"3e8c87fdb4f6b86452fbe3671f11b56d2d62c3247a473f41d0495bcef3fcdcc0"} Jan 29 12:32:43 crc kubenswrapper[4753]: I0129 12:32:43.497454 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"73d3c43c-a487-4cbe-a709-833d3814f63a","Type":"ContainerDied","Data":"3f6621ad2e58b4a13f2b99f35f270528fe0246f67d39c0365b9fbc030c6572e2"} Jan 29 12:32:43 crc kubenswrapper[4753]: I0129 12:32:43.497480 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"73d3c43c-a487-4cbe-a709-833d3814f63a","Type":"ContainerDied","Data":"0c2306ddbfab7861c3d8c00e01664bfcee1600c5232e90de7e0b99398486c0f2"} Jan 29 12:32:43 crc kubenswrapper[4753]: I0129 12:32:43.497490 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"73d3c43c-a487-4cbe-a709-833d3814f63a","Type":"ContainerDied","Data":"c41912cf54b65b01bda7208bb5de90c51fa6e0b614418bc1b81e4d0f739ae254"} Jan 29 12:32:43 crc kubenswrapper[4753]: I0129 12:32:43.497514 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-proxy-8695cf79dc-6tgzq" podUID="5cd837fc-3257-446b-a556-381501bf95da" containerName="proxy-httpd" containerID="cri-o://acedc6bfe58c089a8ea686fa0f17b0037e521119226c6fbf605d5814940ad4bd" gracePeriod=30 Jan 29 12:32:43 crc kubenswrapper[4753]: I0129 12:32:43.497583 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-proxy-8695cf79dc-6tgzq" podUID="5cd837fc-3257-446b-a556-381501bf95da" containerName="proxy-server" containerID="cri-o://8735cb8d474bbb7d151cf8c711d947d690f3b2519906d4a1d086c18431e4a215" gracePeriod=30 Jan 29 12:32:43 crc kubenswrapper[4753]: I0129 12:32:43.497522 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"73d3c43c-a487-4cbe-a709-833d3814f63a","Type":"ContainerDied","Data":"5954307440c49857c3884bc677c6c2c89aedf9990fc971b9c3c4d098f59608f3"} Jan 29 12:32:43 crc kubenswrapper[4753]: I0129 12:32:43.497677 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"73d3c43c-a487-4cbe-a709-833d3814f63a","Type":"ContainerDied","Data":"5306dcba72c8b09ae9b789f532b3b2832b52b3ee90a7a0241b4b3ab378c7447c"} Jan 29 12:32:43 crc kubenswrapper[4753]: I0129 12:32:43.497693 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"73d3c43c-a487-4cbe-a709-833d3814f63a","Type":"ContainerDied","Data":"e5ad87c4bb81c3f7473b77a68e3954fb7d2ea28653225d2b6391291fd359f6bb"} Jan 29 12:32:43 crc kubenswrapper[4753]: I0129 12:32:43.497705 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"73d3c43c-a487-4cbe-a709-833d3814f63a","Type":"ContainerDied","Data":"85901a21b03cb05ab63534ba59766d3e5db5f022fa4ef57a44a5763481c89795"} Jan 29 12:32:43 crc kubenswrapper[4753]: I0129 12:32:43.900589 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5188a03d-cba0-4bbb-9c43-5f1ff77cf44d" path="/var/lib/kubelet/pods/5188a03d-cba0-4bbb-9c43-5f1ff77cf44d/volumes" Jan 29 12:32:44 crc kubenswrapper[4753]: I0129 12:32:44.508113 4753 generic.go:334] "Generic (PLEG): container finished" podID="5cd837fc-3257-446b-a556-381501bf95da" containerID="8735cb8d474bbb7d151cf8c711d947d690f3b2519906d4a1d086c18431e4a215" exitCode=0 Jan 29 12:32:44 crc kubenswrapper[4753]: I0129 12:32:44.508162 4753 generic.go:334] "Generic (PLEG): container finished" podID="5cd837fc-3257-446b-a556-381501bf95da" containerID="acedc6bfe58c089a8ea686fa0f17b0037e521119226c6fbf605d5814940ad4bd" exitCode=0 Jan 29 12:32:44 crc kubenswrapper[4753]: I0129 12:32:44.508214 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-proxy-8695cf79dc-6tgzq" event={"ID":"5cd837fc-3257-446b-a556-381501bf95da","Type":"ContainerDied","Data":"8735cb8d474bbb7d151cf8c711d947d690f3b2519906d4a1d086c18431e4a215"} Jan 29 12:32:44 crc kubenswrapper[4753]: I0129 12:32:44.508299 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-proxy-8695cf79dc-6tgzq" event={"ID":"5cd837fc-3257-446b-a556-381501bf95da","Type":"ContainerDied","Data":"acedc6bfe58c089a8ea686fa0f17b0037e521119226c6fbf605d5814940ad4bd"} Jan 29 12:32:44 crc kubenswrapper[4753]: I0129 12:32:44.578588 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-proxy-8695cf79dc-6tgzq" Jan 29 12:32:44 crc kubenswrapper[4753]: I0129 12:32:44.757900 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5cd837fc-3257-446b-a556-381501bf95da-log-httpd\") pod \"5cd837fc-3257-446b-a556-381501bf95da\" (UID: \"5cd837fc-3257-446b-a556-381501bf95da\") " Jan 29 12:32:44 crc kubenswrapper[4753]: I0129 12:32:44.758068 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5cd837fc-3257-446b-a556-381501bf95da-config-data\") pod \"5cd837fc-3257-446b-a556-381501bf95da\" (UID: \"5cd837fc-3257-446b-a556-381501bf95da\") " Jan 29 12:32:44 crc kubenswrapper[4753]: I0129 12:32:44.758141 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5cd837fc-3257-446b-a556-381501bf95da-run-httpd\") pod \"5cd837fc-3257-446b-a556-381501bf95da\" (UID: \"5cd837fc-3257-446b-a556-381501bf95da\") " Jan 29 12:32:44 crc kubenswrapper[4753]: I0129 12:32:44.758186 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb8vv\" (UniqueName: \"kubernetes.io/projected/5cd837fc-3257-446b-a556-381501bf95da-kube-api-access-sb8vv\") pod \"5cd837fc-3257-446b-a556-381501bf95da\" (UID: \"5cd837fc-3257-446b-a556-381501bf95da\") " Jan 29 12:32:44 crc kubenswrapper[4753]: I0129 12:32:44.758283 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/5cd837fc-3257-446b-a556-381501bf95da-etc-swift\") pod \"5cd837fc-3257-446b-a556-381501bf95da\" (UID: \"5cd837fc-3257-446b-a556-381501bf95da\") " Jan 29 12:32:44 crc kubenswrapper[4753]: I0129 12:32:44.758506 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5cd837fc-3257-446b-a556-381501bf95da-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "5cd837fc-3257-446b-a556-381501bf95da" (UID: "5cd837fc-3257-446b-a556-381501bf95da"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:32:44 crc kubenswrapper[4753]: I0129 12:32:44.758605 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5cd837fc-3257-446b-a556-381501bf95da-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "5cd837fc-3257-446b-a556-381501bf95da" (UID: "5cd837fc-3257-446b-a556-381501bf95da"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:32:44 crc kubenswrapper[4753]: I0129 12:32:44.758695 4753 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5cd837fc-3257-446b-a556-381501bf95da-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 29 12:32:44 crc kubenswrapper[4753]: I0129 12:32:44.758713 4753 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5cd837fc-3257-446b-a556-381501bf95da-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 29 12:32:44 crc kubenswrapper[4753]: I0129 12:32:44.763901 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5cd837fc-3257-446b-a556-381501bf95da-kube-api-access-sb8vv" (OuterVolumeSpecName: "kube-api-access-sb8vv") pod "5cd837fc-3257-446b-a556-381501bf95da" (UID: "5cd837fc-3257-446b-a556-381501bf95da"). InnerVolumeSpecName "kube-api-access-sb8vv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:32:44 crc kubenswrapper[4753]: I0129 12:32:44.764471 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5cd837fc-3257-446b-a556-381501bf95da-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "5cd837fc-3257-446b-a556-381501bf95da" (UID: "5cd837fc-3257-446b-a556-381501bf95da"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:32:44 crc kubenswrapper[4753]: I0129 12:32:44.793634 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5cd837fc-3257-446b-a556-381501bf95da-config-data" (OuterVolumeSpecName: "config-data") pod "5cd837fc-3257-446b-a556-381501bf95da" (UID: "5cd837fc-3257-446b-a556-381501bf95da"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:32:44 crc kubenswrapper[4753]: I0129 12:32:44.860267 4753 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5cd837fc-3257-446b-a556-381501bf95da-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 12:32:44 crc kubenswrapper[4753]: I0129 12:32:44.860315 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb8vv\" (UniqueName: \"kubernetes.io/projected/5cd837fc-3257-446b-a556-381501bf95da-kube-api-access-sb8vv\") on node \"crc\" DevicePath \"\"" Jan 29 12:32:44 crc kubenswrapper[4753]: I0129 12:32:44.860325 4753 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/5cd837fc-3257-446b-a556-381501bf95da-etc-swift\") on node \"crc\" DevicePath \"\"" Jan 29 12:32:45 crc kubenswrapper[4753]: I0129 12:32:45.518801 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-proxy-8695cf79dc-6tgzq" event={"ID":"5cd837fc-3257-446b-a556-381501bf95da","Type":"ContainerDied","Data":"28d116828e4aa2e048748d4499badbee9bd9fd8324685813749109fc0f8f876b"} Jan 29 12:32:45 crc kubenswrapper[4753]: I0129 12:32:45.518867 4753 scope.go:117] "RemoveContainer" containerID="8735cb8d474bbb7d151cf8c711d947d690f3b2519906d4a1d086c18431e4a215" Jan 29 12:32:45 crc kubenswrapper[4753]: I0129 12:32:45.518893 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-proxy-8695cf79dc-6tgzq" Jan 29 12:32:45 crc kubenswrapper[4753]: I0129 12:32:45.554475 4753 scope.go:117] "RemoveContainer" containerID="acedc6bfe58c089a8ea686fa0f17b0037e521119226c6fbf605d5814940ad4bd" Jan 29 12:32:45 crc kubenswrapper[4753]: I0129 12:32:45.560153 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["swift-kuttl-tests/swift-proxy-8695cf79dc-6tgzq"] Jan 29 12:32:45 crc kubenswrapper[4753]: I0129 12:32:45.567651 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["swift-kuttl-tests/swift-proxy-8695cf79dc-6tgzq"] Jan 29 12:32:45 crc kubenswrapper[4753]: I0129 12:32:45.897731 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5cd837fc-3257-446b-a556-381501bf95da" path="/var/lib/kubelet/pods/5cd837fc-3257-446b-a556-381501bf95da/volumes" Jan 29 12:32:59 crc kubenswrapper[4753]: I0129 12:32:59.252840 4753 patch_prober.go:28] interesting pod/machine-config-daemon-7c24x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 12:32:59 crc kubenswrapper[4753]: I0129 12:32:59.253609 4753 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" podUID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 12:32:59 crc kubenswrapper[4753]: I0129 12:32:59.253736 4753 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" Jan 29 12:32:59 crc kubenswrapper[4753]: I0129 12:32:59.254546 4753 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"00211d2562b6632acb04decda9045cbb8d3216adca69cd5bc1380ec42b8211e0"} pod="openshift-machine-config-operator/machine-config-daemon-7c24x" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 29 12:32:59 crc kubenswrapper[4753]: I0129 12:32:59.254645 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" podUID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" containerName="machine-config-daemon" containerID="cri-o://00211d2562b6632acb04decda9045cbb8d3216adca69cd5bc1380ec42b8211e0" gracePeriod=600 Jan 29 12:32:59 crc kubenswrapper[4753]: E0129 12:32:59.373598 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7c24x_openshift-machine-config-operator(b0310995-a7c7-47c3-ae6c-05daaaba92a6)\"" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" podUID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" Jan 29 12:32:59 crc kubenswrapper[4753]: I0129 12:32:59.858583 4753 generic.go:334] "Generic (PLEG): container finished" podID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" containerID="00211d2562b6632acb04decda9045cbb8d3216adca69cd5bc1380ec42b8211e0" exitCode=0 Jan 29 12:32:59 crc kubenswrapper[4753]: I0129 12:32:59.858649 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" event={"ID":"b0310995-a7c7-47c3-ae6c-05daaaba92a6","Type":"ContainerDied","Data":"00211d2562b6632acb04decda9045cbb8d3216adca69cd5bc1380ec42b8211e0"} Jan 29 12:32:59 crc kubenswrapper[4753]: I0129 12:32:59.858742 4753 scope.go:117] "RemoveContainer" containerID="46e4d0e61ffa31047d03b8be433c21bd5af5bab8ccbf094d8375450be774834e" Jan 29 12:32:59 crc kubenswrapper[4753]: I0129 12:32:59.859407 4753 scope.go:117] "RemoveContainer" containerID="00211d2562b6632acb04decda9045cbb8d3216adca69cd5bc1380ec42b8211e0" Jan 29 12:32:59 crc kubenswrapper[4753]: E0129 12:32:59.859731 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7c24x_openshift-machine-config-operator(b0310995-a7c7-47c3-ae6c-05daaaba92a6)\"" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" podUID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" Jan 29 12:33:12 crc kubenswrapper[4753]: I0129 12:33:12.888642 4753 scope.go:117] "RemoveContainer" containerID="00211d2562b6632acb04decda9045cbb8d3216adca69cd5bc1380ec42b8211e0" Jan 29 12:33:12 crc kubenswrapper[4753]: E0129 12:33:12.889325 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7c24x_openshift-machine-config-operator(b0310995-a7c7-47c3-ae6c-05daaaba92a6)\"" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" podUID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" Jan 29 12:33:13 crc kubenswrapper[4753]: I0129 12:33:13.191350 4753 generic.go:334] "Generic (PLEG): container finished" podID="73d3c43c-a487-4cbe-a709-833d3814f63a" containerID="d2cf1d3b83ecb066e0bed30a672e7ba8aaa3c97e9260bb934ffb23a7dea23f19" exitCode=137 Jan 29 12:33:13 crc kubenswrapper[4753]: I0129 12:33:13.191406 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"73d3c43c-a487-4cbe-a709-833d3814f63a","Type":"ContainerDied","Data":"d2cf1d3b83ecb066e0bed30a672e7ba8aaa3c97e9260bb934ffb23a7dea23f19"} Jan 29 12:33:13 crc kubenswrapper[4753]: I0129 12:33:13.323694 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:33:13 crc kubenswrapper[4753]: I0129 12:33:13.374573 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swift\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"73d3c43c-a487-4cbe-a709-833d3814f63a\" (UID: \"73d3c43c-a487-4cbe-a709-833d3814f63a\") " Jan 29 12:33:13 crc kubenswrapper[4753]: I0129 12:33:13.374628 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rhngw\" (UniqueName: \"kubernetes.io/projected/73d3c43c-a487-4cbe-a709-833d3814f63a-kube-api-access-rhngw\") pod \"73d3c43c-a487-4cbe-a709-833d3814f63a\" (UID: \"73d3c43c-a487-4cbe-a709-833d3814f63a\") " Jan 29 12:33:13 crc kubenswrapper[4753]: I0129 12:33:13.383280 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage04-crc" (OuterVolumeSpecName: "swift") pod "73d3c43c-a487-4cbe-a709-833d3814f63a" (UID: "73d3c43c-a487-4cbe-a709-833d3814f63a"). InnerVolumeSpecName "local-storage04-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 29 12:33:13 crc kubenswrapper[4753]: I0129 12:33:13.385507 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/73d3c43c-a487-4cbe-a709-833d3814f63a-kube-api-access-rhngw" (OuterVolumeSpecName: "kube-api-access-rhngw") pod "73d3c43c-a487-4cbe-a709-833d3814f63a" (UID: "73d3c43c-a487-4cbe-a709-833d3814f63a"). InnerVolumeSpecName "kube-api-access-rhngw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:33:13 crc kubenswrapper[4753]: I0129 12:33:13.528588 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/73d3c43c-a487-4cbe-a709-833d3814f63a-cache\") pod \"73d3c43c-a487-4cbe-a709-833d3814f63a\" (UID: \"73d3c43c-a487-4cbe-a709-833d3814f63a\") " Jan 29 12:33:13 crc kubenswrapper[4753]: I0129 12:33:13.528636 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/73d3c43c-a487-4cbe-a709-833d3814f63a-etc-swift\") pod \"73d3c43c-a487-4cbe-a709-833d3814f63a\" (UID: \"73d3c43c-a487-4cbe-a709-833d3814f63a\") " Jan 29 12:33:13 crc kubenswrapper[4753]: I0129 12:33:13.528706 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/73d3c43c-a487-4cbe-a709-833d3814f63a-lock\") pod \"73d3c43c-a487-4cbe-a709-833d3814f63a\" (UID: \"73d3c43c-a487-4cbe-a709-833d3814f63a\") " Jan 29 12:33:13 crc kubenswrapper[4753]: I0129 12:33:13.528992 4753 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" " Jan 29 12:33:13 crc kubenswrapper[4753]: I0129 12:33:13.529015 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rhngw\" (UniqueName: \"kubernetes.io/projected/73d3c43c-a487-4cbe-a709-833d3814f63a-kube-api-access-rhngw\") on node \"crc\" DevicePath \"\"" Jan 29 12:33:13 crc kubenswrapper[4753]: I0129 12:33:13.529929 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/73d3c43c-a487-4cbe-a709-833d3814f63a-cache" (OuterVolumeSpecName: "cache") pod "73d3c43c-a487-4cbe-a709-833d3814f63a" (UID: "73d3c43c-a487-4cbe-a709-833d3814f63a"). InnerVolumeSpecName "cache". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:33:13 crc kubenswrapper[4753]: I0129 12:33:13.530184 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/73d3c43c-a487-4cbe-a709-833d3814f63a-lock" (OuterVolumeSpecName: "lock") pod "73d3c43c-a487-4cbe-a709-833d3814f63a" (UID: "73d3c43c-a487-4cbe-a709-833d3814f63a"). InnerVolumeSpecName "lock". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:33:13 crc kubenswrapper[4753]: I0129 12:33:13.539023 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/73d3c43c-a487-4cbe-a709-833d3814f63a-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "73d3c43c-a487-4cbe-a709-833d3814f63a" (UID: "73d3c43c-a487-4cbe-a709-833d3814f63a"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:33:13 crc kubenswrapper[4753]: I0129 12:33:13.544239 4753 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage04-crc" (UniqueName: "kubernetes.io/local-volume/local-storage04-crc") on node "crc" Jan 29 12:33:13 crc kubenswrapper[4753]: I0129 12:33:13.630808 4753 reconciler_common.go:293] "Volume detached for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" DevicePath \"\"" Jan 29 12:33:13 crc kubenswrapper[4753]: I0129 12:33:13.630886 4753 reconciler_common.go:293] "Volume detached for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/73d3c43c-a487-4cbe-a709-833d3814f63a-cache\") on node \"crc\" DevicePath \"\"" Jan 29 12:33:13 crc kubenswrapper[4753]: I0129 12:33:13.630901 4753 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/73d3c43c-a487-4cbe-a709-833d3814f63a-etc-swift\") on node \"crc\" DevicePath \"\"" Jan 29 12:33:13 crc kubenswrapper[4753]: I0129 12:33:13.630912 4753 reconciler_common.go:293] "Volume detached for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/73d3c43c-a487-4cbe-a709-833d3814f63a-lock\") on node \"crc\" DevicePath \"\"" Jan 29 12:33:14 crc kubenswrapper[4753]: I0129 12:33:14.209440 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"73d3c43c-a487-4cbe-a709-833d3814f63a","Type":"ContainerDied","Data":"94deb8d5c82a323b720d1664655132f29900b81cee4cbdc08911cb7fe2de91ee"} Jan 29 12:33:14 crc kubenswrapper[4753]: I0129 12:33:14.209581 4753 scope.go:117] "RemoveContainer" containerID="d2cf1d3b83ecb066e0bed30a672e7ba8aaa3c97e9260bb934ffb23a7dea23f19" Jan 29 12:33:14 crc kubenswrapper[4753]: I0129 12:33:14.210027 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:33:14 crc kubenswrapper[4753]: I0129 12:33:14.247312 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["swift-kuttl-tests/swift-storage-0"] Jan 29 12:33:14 crc kubenswrapper[4753]: I0129 12:33:14.251699 4753 scope.go:117] "RemoveContainer" containerID="34e6277ec7168ded71495f4f3d65b9b73a1eaa282bdabbffa51a0a4c097682c7" Jan 29 12:33:14 crc kubenswrapper[4753]: I0129 12:33:14.252605 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["swift-kuttl-tests/swift-storage-0"] Jan 29 12:33:14 crc kubenswrapper[4753]: I0129 12:33:14.274112 4753 scope.go:117] "RemoveContainer" containerID="d3c3cd604542f57234212255910da9908efff1b32ba20df662a743db9bc924bf" Jan 29 12:33:14 crc kubenswrapper[4753]: I0129 12:33:14.290539 4753 scope.go:117] "RemoveContainer" containerID="52b28e88e824c6b15431ab55f4cc5b602d678bc924fa3aa5f0da872b74f9ace1" Jan 29 12:33:14 crc kubenswrapper[4753]: I0129 12:33:14.309372 4753 scope.go:117] "RemoveContainer" containerID="188a863138cbf7458190657501c965dc65f9d80da0573bd2fc6dcb31f3433e43" Jan 29 12:33:14 crc kubenswrapper[4753]: I0129 12:33:14.328304 4753 scope.go:117] "RemoveContainer" containerID="fa72c391721aa00dc31c6e7814fc8821cbc5f688af44d0e7b8dfb889bbd67035" Jan 29 12:33:14 crc kubenswrapper[4753]: I0129 12:33:14.348590 4753 scope.go:117] "RemoveContainer" containerID="e44812da75dd26639443b2c1f81bd677b15708e2595e59965d98d3c832858ea0" Jan 29 12:33:14 crc kubenswrapper[4753]: I0129 12:33:14.366077 4753 scope.go:117] "RemoveContainer" containerID="3e8c87fdb4f6b86452fbe3671f11b56d2d62c3247a473f41d0495bcef3fcdcc0" Jan 29 12:33:14 crc kubenswrapper[4753]: I0129 12:33:14.395350 4753 scope.go:117] "RemoveContainer" containerID="3f6621ad2e58b4a13f2b99f35f270528fe0246f67d39c0365b9fbc030c6572e2" Jan 29 12:33:14 crc kubenswrapper[4753]: I0129 12:33:14.420384 4753 scope.go:117] "RemoveContainer" containerID="0c2306ddbfab7861c3d8c00e01664bfcee1600c5232e90de7e0b99398486c0f2" Jan 29 12:33:14 crc kubenswrapper[4753]: I0129 12:33:14.439038 4753 scope.go:117] "RemoveContainer" containerID="c41912cf54b65b01bda7208bb5de90c51fa6e0b614418bc1b81e4d0f739ae254" Jan 29 12:33:14 crc kubenswrapper[4753]: I0129 12:33:14.458025 4753 scope.go:117] "RemoveContainer" containerID="5954307440c49857c3884bc677c6c2c89aedf9990fc971b9c3c4d098f59608f3" Jan 29 12:33:14 crc kubenswrapper[4753]: I0129 12:33:14.478027 4753 scope.go:117] "RemoveContainer" containerID="5306dcba72c8b09ae9b789f532b3b2832b52b3ee90a7a0241b4b3ab378c7447c" Jan 29 12:33:14 crc kubenswrapper[4753]: I0129 12:33:14.496048 4753 scope.go:117] "RemoveContainer" containerID="e5ad87c4bb81c3f7473b77a68e3954fb7d2ea28653225d2b6391291fd359f6bb" Jan 29 12:33:14 crc kubenswrapper[4753]: I0129 12:33:14.523997 4753 scope.go:117] "RemoveContainer" containerID="85901a21b03cb05ab63534ba59766d3e5db5f022fa4ef57a44a5763481c89795" Jan 29 12:33:15 crc kubenswrapper[4753]: I0129 12:33:15.898828 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="73d3c43c-a487-4cbe-a709-833d3814f63a" path="/var/lib/kubelet/pods/73d3c43c-a487-4cbe-a709-833d3814f63a/volumes" Jan 29 12:33:16 crc kubenswrapper[4753]: I0129 12:33:16.606695 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["swift-kuttl-tests/swift-storage-0"] Jan 29 12:33:16 crc kubenswrapper[4753]: E0129 12:33:16.607209 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="73d3c43c-a487-4cbe-a709-833d3814f63a" containerName="object-expirer" Jan 29 12:33:16 crc kubenswrapper[4753]: I0129 12:33:16.607266 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="73d3c43c-a487-4cbe-a709-833d3814f63a" containerName="object-expirer" Jan 29 12:33:16 crc kubenswrapper[4753]: E0129 12:33:16.607286 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="73d3c43c-a487-4cbe-a709-833d3814f63a" containerName="container-server" Jan 29 12:33:16 crc kubenswrapper[4753]: I0129 12:33:16.607292 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="73d3c43c-a487-4cbe-a709-833d3814f63a" containerName="container-server" Jan 29 12:33:16 crc kubenswrapper[4753]: E0129 12:33:16.607304 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5188a03d-cba0-4bbb-9c43-5f1ff77cf44d" containerName="swift-ring-rebalance" Jan 29 12:33:16 crc kubenswrapper[4753]: I0129 12:33:16.607310 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="5188a03d-cba0-4bbb-9c43-5f1ff77cf44d" containerName="swift-ring-rebalance" Jan 29 12:33:16 crc kubenswrapper[4753]: E0129 12:33:16.607324 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="73d3c43c-a487-4cbe-a709-833d3814f63a" containerName="object-updater" Jan 29 12:33:16 crc kubenswrapper[4753]: I0129 12:33:16.607332 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="73d3c43c-a487-4cbe-a709-833d3814f63a" containerName="object-updater" Jan 29 12:33:16 crc kubenswrapper[4753]: E0129 12:33:16.607343 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="73d3c43c-a487-4cbe-a709-833d3814f63a" containerName="object-auditor" Jan 29 12:33:16 crc kubenswrapper[4753]: I0129 12:33:16.607351 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="73d3c43c-a487-4cbe-a709-833d3814f63a" containerName="object-auditor" Jan 29 12:33:16 crc kubenswrapper[4753]: E0129 12:33:16.607377 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="73d3c43c-a487-4cbe-a709-833d3814f63a" containerName="object-replicator" Jan 29 12:33:16 crc kubenswrapper[4753]: I0129 12:33:16.607385 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="73d3c43c-a487-4cbe-a709-833d3814f63a" containerName="object-replicator" Jan 29 12:33:16 crc kubenswrapper[4753]: E0129 12:33:16.607396 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="73d3c43c-a487-4cbe-a709-833d3814f63a" containerName="account-server" Jan 29 12:33:16 crc kubenswrapper[4753]: I0129 12:33:16.607403 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="73d3c43c-a487-4cbe-a709-833d3814f63a" containerName="account-server" Jan 29 12:33:16 crc kubenswrapper[4753]: E0129 12:33:16.607412 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="73d3c43c-a487-4cbe-a709-833d3814f63a" containerName="swift-recon-cron" Jan 29 12:33:16 crc kubenswrapper[4753]: I0129 12:33:16.607419 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="73d3c43c-a487-4cbe-a709-833d3814f63a" containerName="swift-recon-cron" Jan 29 12:33:16 crc kubenswrapper[4753]: E0129 12:33:16.607432 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="73d3c43c-a487-4cbe-a709-833d3814f63a" containerName="account-replicator" Jan 29 12:33:16 crc kubenswrapper[4753]: I0129 12:33:16.607440 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="73d3c43c-a487-4cbe-a709-833d3814f63a" containerName="account-replicator" Jan 29 12:33:16 crc kubenswrapper[4753]: E0129 12:33:16.607453 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="73d3c43c-a487-4cbe-a709-833d3814f63a" containerName="container-updater" Jan 29 12:33:16 crc kubenswrapper[4753]: I0129 12:33:16.607460 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="73d3c43c-a487-4cbe-a709-833d3814f63a" containerName="container-updater" Jan 29 12:33:16 crc kubenswrapper[4753]: E0129 12:33:16.607474 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="73d3c43c-a487-4cbe-a709-833d3814f63a" containerName="container-replicator" Jan 29 12:33:16 crc kubenswrapper[4753]: I0129 12:33:16.607481 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="73d3c43c-a487-4cbe-a709-833d3814f63a" containerName="container-replicator" Jan 29 12:33:16 crc kubenswrapper[4753]: E0129 12:33:16.607492 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="73d3c43c-a487-4cbe-a709-833d3814f63a" containerName="container-auditor" Jan 29 12:33:16 crc kubenswrapper[4753]: I0129 12:33:16.607499 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="73d3c43c-a487-4cbe-a709-833d3814f63a" containerName="container-auditor" Jan 29 12:33:16 crc kubenswrapper[4753]: E0129 12:33:16.607512 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5cd837fc-3257-446b-a556-381501bf95da" containerName="proxy-httpd" Jan 29 12:33:16 crc kubenswrapper[4753]: I0129 12:33:16.607519 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="5cd837fc-3257-446b-a556-381501bf95da" containerName="proxy-httpd" Jan 29 12:33:16 crc kubenswrapper[4753]: E0129 12:33:16.607529 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="73d3c43c-a487-4cbe-a709-833d3814f63a" containerName="account-auditor" Jan 29 12:33:16 crc kubenswrapper[4753]: I0129 12:33:16.607536 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="73d3c43c-a487-4cbe-a709-833d3814f63a" containerName="account-auditor" Jan 29 12:33:16 crc kubenswrapper[4753]: E0129 12:33:16.607548 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="73d3c43c-a487-4cbe-a709-833d3814f63a" containerName="account-reaper" Jan 29 12:33:16 crc kubenswrapper[4753]: I0129 12:33:16.607558 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="73d3c43c-a487-4cbe-a709-833d3814f63a" containerName="account-reaper" Jan 29 12:33:16 crc kubenswrapper[4753]: E0129 12:33:16.607572 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="73d3c43c-a487-4cbe-a709-833d3814f63a" containerName="object-server" Jan 29 12:33:16 crc kubenswrapper[4753]: I0129 12:33:16.607603 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="73d3c43c-a487-4cbe-a709-833d3814f63a" containerName="object-server" Jan 29 12:33:16 crc kubenswrapper[4753]: E0129 12:33:16.607614 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5cd837fc-3257-446b-a556-381501bf95da" containerName="proxy-server" Jan 29 12:33:16 crc kubenswrapper[4753]: I0129 12:33:16.607623 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="5cd837fc-3257-446b-a556-381501bf95da" containerName="proxy-server" Jan 29 12:33:16 crc kubenswrapper[4753]: E0129 12:33:16.607644 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="73d3c43c-a487-4cbe-a709-833d3814f63a" containerName="rsync" Jan 29 12:33:16 crc kubenswrapper[4753]: I0129 12:33:16.607653 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="73d3c43c-a487-4cbe-a709-833d3814f63a" containerName="rsync" Jan 29 12:33:16 crc kubenswrapper[4753]: I0129 12:33:16.607894 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="5188a03d-cba0-4bbb-9c43-5f1ff77cf44d" containerName="swift-ring-rebalance" Jan 29 12:33:16 crc kubenswrapper[4753]: I0129 12:33:16.607922 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="73d3c43c-a487-4cbe-a709-833d3814f63a" containerName="object-replicator" Jan 29 12:33:16 crc kubenswrapper[4753]: I0129 12:33:16.607936 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="73d3c43c-a487-4cbe-a709-833d3814f63a" containerName="object-updater" Jan 29 12:33:16 crc kubenswrapper[4753]: I0129 12:33:16.607951 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="73d3c43c-a487-4cbe-a709-833d3814f63a" containerName="account-reaper" Jan 29 12:33:16 crc kubenswrapper[4753]: I0129 12:33:16.607960 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="73d3c43c-a487-4cbe-a709-833d3814f63a" containerName="account-replicator" Jan 29 12:33:16 crc kubenswrapper[4753]: I0129 12:33:16.607970 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="5cd837fc-3257-446b-a556-381501bf95da" containerName="proxy-server" Jan 29 12:33:16 crc kubenswrapper[4753]: I0129 12:33:16.607977 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="73d3c43c-a487-4cbe-a709-833d3814f63a" containerName="container-updater" Jan 29 12:33:16 crc kubenswrapper[4753]: I0129 12:33:16.607987 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="73d3c43c-a487-4cbe-a709-833d3814f63a" containerName="object-expirer" Jan 29 12:33:16 crc kubenswrapper[4753]: I0129 12:33:16.608002 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="73d3c43c-a487-4cbe-a709-833d3814f63a" containerName="rsync" Jan 29 12:33:16 crc kubenswrapper[4753]: I0129 12:33:16.608015 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="73d3c43c-a487-4cbe-a709-833d3814f63a" containerName="object-auditor" Jan 29 12:33:16 crc kubenswrapper[4753]: I0129 12:33:16.608024 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="73d3c43c-a487-4cbe-a709-833d3814f63a" containerName="container-server" Jan 29 12:33:16 crc kubenswrapper[4753]: I0129 12:33:16.608036 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="73d3c43c-a487-4cbe-a709-833d3814f63a" containerName="container-replicator" Jan 29 12:33:16 crc kubenswrapper[4753]: I0129 12:33:16.608047 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="73d3c43c-a487-4cbe-a709-833d3814f63a" containerName="account-auditor" Jan 29 12:33:16 crc kubenswrapper[4753]: I0129 12:33:16.608055 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="73d3c43c-a487-4cbe-a709-833d3814f63a" containerName="object-server" Jan 29 12:33:16 crc kubenswrapper[4753]: I0129 12:33:16.608066 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="73d3c43c-a487-4cbe-a709-833d3814f63a" containerName="container-auditor" Jan 29 12:33:16 crc kubenswrapper[4753]: I0129 12:33:16.608076 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="73d3c43c-a487-4cbe-a709-833d3814f63a" containerName="account-server" Jan 29 12:33:16 crc kubenswrapper[4753]: I0129 12:33:16.608087 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="73d3c43c-a487-4cbe-a709-833d3814f63a" containerName="swift-recon-cron" Jan 29 12:33:16 crc kubenswrapper[4753]: I0129 12:33:16.608099 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="5cd837fc-3257-446b-a556-381501bf95da" containerName="proxy-httpd" Jan 29 12:33:16 crc kubenswrapper[4753]: I0129 12:33:16.613096 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:33:16 crc kubenswrapper[4753]: I0129 12:33:16.616127 4753 reflector.go:368] Caches populated for *v1.Secret from object-"swift-kuttl-tests"/"swift-swift-dockercfg-8mbh8" Jan 29 12:33:16 crc kubenswrapper[4753]: I0129 12:33:16.616462 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"swift-kuttl-tests"/"swift-storage-config-data" Jan 29 12:33:16 crc kubenswrapper[4753]: I0129 12:33:16.616634 4753 reflector.go:368] Caches populated for *v1.Secret from object-"swift-kuttl-tests"/"combined-ca-bundle" Jan 29 12:33:16 crc kubenswrapper[4753]: I0129 12:33:16.617892 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"swift-kuttl-tests"/"swift-ring-files" Jan 29 12:33:16 crc kubenswrapper[4753]: I0129 12:33:16.617947 4753 reflector.go:368] Caches populated for *v1.Secret from object-"swift-kuttl-tests"/"swift-conf" Jan 29 12:33:16 crc kubenswrapper[4753]: I0129 12:33:16.700843 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/swift-storage-0"] Jan 29 12:33:16 crc kubenswrapper[4753]: I0129 12:33:16.808665 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/440fbaaa-a825-4b7c-b5f5-6cad02f05493-lock\") pod \"swift-storage-0\" (UID: \"440fbaaa-a825-4b7c-b5f5-6cad02f05493\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:33:16 crc kubenswrapper[4753]: I0129 12:33:16.808768 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/440fbaaa-a825-4b7c-b5f5-6cad02f05493-etc-swift\") pod \"swift-storage-0\" (UID: \"440fbaaa-a825-4b7c-b5f5-6cad02f05493\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:33:16 crc kubenswrapper[4753]: I0129 12:33:16.808897 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/440fbaaa-a825-4b7c-b5f5-6cad02f05493-cache\") pod \"swift-storage-0\" (UID: \"440fbaaa-a825-4b7c-b5f5-6cad02f05493\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:33:16 crc kubenswrapper[4753]: I0129 12:33:16.808931 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2wrvw\" (UniqueName: \"kubernetes.io/projected/440fbaaa-a825-4b7c-b5f5-6cad02f05493-kube-api-access-2wrvw\") pod \"swift-storage-0\" (UID: \"440fbaaa-a825-4b7c-b5f5-6cad02f05493\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:33:16 crc kubenswrapper[4753]: I0129 12:33:16.808968 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"swift-storage-0\" (UID: \"440fbaaa-a825-4b7c-b5f5-6cad02f05493\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:33:16 crc kubenswrapper[4753]: I0129 12:33:16.808997 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/440fbaaa-a825-4b7c-b5f5-6cad02f05493-combined-ca-bundle\") pod \"swift-storage-0\" (UID: \"440fbaaa-a825-4b7c-b5f5-6cad02f05493\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:33:17 crc kubenswrapper[4753]: I0129 12:33:17.008356 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/440fbaaa-a825-4b7c-b5f5-6cad02f05493-cache\") pod \"swift-storage-0\" (UID: \"440fbaaa-a825-4b7c-b5f5-6cad02f05493\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:33:17 crc kubenswrapper[4753]: I0129 12:33:17.008948 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/440fbaaa-a825-4b7c-b5f5-6cad02f05493-cache\") pod \"swift-storage-0\" (UID: \"440fbaaa-a825-4b7c-b5f5-6cad02f05493\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:33:17 crc kubenswrapper[4753]: I0129 12:33:17.009079 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2wrvw\" (UniqueName: \"kubernetes.io/projected/440fbaaa-a825-4b7c-b5f5-6cad02f05493-kube-api-access-2wrvw\") pod \"swift-storage-0\" (UID: \"440fbaaa-a825-4b7c-b5f5-6cad02f05493\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:33:17 crc kubenswrapper[4753]: I0129 12:33:17.009721 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"swift-storage-0\" (UID: \"440fbaaa-a825-4b7c-b5f5-6cad02f05493\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:33:17 crc kubenswrapper[4753]: I0129 12:33:17.009779 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/440fbaaa-a825-4b7c-b5f5-6cad02f05493-combined-ca-bundle\") pod \"swift-storage-0\" (UID: \"440fbaaa-a825-4b7c-b5f5-6cad02f05493\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:33:17 crc kubenswrapper[4753]: I0129 12:33:17.009917 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/440fbaaa-a825-4b7c-b5f5-6cad02f05493-lock\") pod \"swift-storage-0\" (UID: \"440fbaaa-a825-4b7c-b5f5-6cad02f05493\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:33:17 crc kubenswrapper[4753]: I0129 12:33:17.009962 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/440fbaaa-a825-4b7c-b5f5-6cad02f05493-etc-swift\") pod \"swift-storage-0\" (UID: \"440fbaaa-a825-4b7c-b5f5-6cad02f05493\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:33:17 crc kubenswrapper[4753]: E0129 12:33:17.010136 4753 projected.go:288] Couldn't get configMap swift-kuttl-tests/swift-ring-files: configmap "swift-ring-files" not found Jan 29 12:33:17 crc kubenswrapper[4753]: E0129 12:33:17.010160 4753 projected.go:194] Error preparing data for projected volume etc-swift for pod swift-kuttl-tests/swift-storage-0: configmap "swift-ring-files" not found Jan 29 12:33:17 crc kubenswrapper[4753]: E0129 12:33:17.010280 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/440fbaaa-a825-4b7c-b5f5-6cad02f05493-etc-swift podName:440fbaaa-a825-4b7c-b5f5-6cad02f05493 nodeName:}" failed. No retries permitted until 2026-01-29 12:33:17.510206667 +0000 UTC m=+1611.762288122 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/440fbaaa-a825-4b7c-b5f5-6cad02f05493-etc-swift") pod "swift-storage-0" (UID: "440fbaaa-a825-4b7c-b5f5-6cad02f05493") : configmap "swift-ring-files" not found Jan 29 12:33:17 crc kubenswrapper[4753]: I0129 12:33:17.010953 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/440fbaaa-a825-4b7c-b5f5-6cad02f05493-lock\") pod \"swift-storage-0\" (UID: \"440fbaaa-a825-4b7c-b5f5-6cad02f05493\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:33:17 crc kubenswrapper[4753]: I0129 12:33:17.011495 4753 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"swift-storage-0\" (UID: \"440fbaaa-a825-4b7c-b5f5-6cad02f05493\") device mount path \"/mnt/openstack/pv07\"" pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:33:17 crc kubenswrapper[4753]: I0129 12:33:17.032172 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/440fbaaa-a825-4b7c-b5f5-6cad02f05493-combined-ca-bundle\") pod \"swift-storage-0\" (UID: \"440fbaaa-a825-4b7c-b5f5-6cad02f05493\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:33:17 crc kubenswrapper[4753]: I0129 12:33:17.035045 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2wrvw\" (UniqueName: \"kubernetes.io/projected/440fbaaa-a825-4b7c-b5f5-6cad02f05493-kube-api-access-2wrvw\") pod \"swift-storage-0\" (UID: \"440fbaaa-a825-4b7c-b5f5-6cad02f05493\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:33:17 crc kubenswrapper[4753]: I0129 12:33:17.044116 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"swift-storage-0\" (UID: \"440fbaaa-a825-4b7c-b5f5-6cad02f05493\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:33:17 crc kubenswrapper[4753]: I0129 12:33:17.065535 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["swift-kuttl-tests/swift-ring-rebalance-jvrtt"] Jan 29 12:33:17 crc kubenswrapper[4753]: I0129 12:33:17.066438 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-ring-rebalance-jvrtt" Jan 29 12:33:17 crc kubenswrapper[4753]: I0129 12:33:17.068308 4753 reflector.go:368] Caches populated for *v1.Secret from object-"swift-kuttl-tests"/"swift-proxy-config-data" Jan 29 12:33:17 crc kubenswrapper[4753]: I0129 12:33:17.068564 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"swift-kuttl-tests"/"swift-ring-scripts" Jan 29 12:33:17 crc kubenswrapper[4753]: I0129 12:33:17.070790 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"swift-kuttl-tests"/"swift-ring-config-data" Jan 29 12:33:17 crc kubenswrapper[4753]: I0129 12:33:17.082028 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/swift-ring-rebalance-jvrtt"] Jan 29 12:33:17 crc kubenswrapper[4753]: I0129 12:33:17.213286 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7f7e23b6-3379-4af2-a33b-2e1b12288561-combined-ca-bundle\") pod \"swift-ring-rebalance-jvrtt\" (UID: \"7f7e23b6-3379-4af2-a33b-2e1b12288561\") " pod="swift-kuttl-tests/swift-ring-rebalance-jvrtt" Jan 29 12:33:17 crc kubenswrapper[4753]: I0129 12:33:17.213358 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/7f7e23b6-3379-4af2-a33b-2e1b12288561-ring-data-devices\") pod \"swift-ring-rebalance-jvrtt\" (UID: \"7f7e23b6-3379-4af2-a33b-2e1b12288561\") " pod="swift-kuttl-tests/swift-ring-rebalance-jvrtt" Jan 29 12:33:17 crc kubenswrapper[4753]: I0129 12:33:17.213380 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/7f7e23b6-3379-4af2-a33b-2e1b12288561-swiftconf\") pod \"swift-ring-rebalance-jvrtt\" (UID: \"7f7e23b6-3379-4af2-a33b-2e1b12288561\") " pod="swift-kuttl-tests/swift-ring-rebalance-jvrtt" Jan 29 12:33:17 crc kubenswrapper[4753]: I0129 12:33:17.213404 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/7f7e23b6-3379-4af2-a33b-2e1b12288561-dispersionconf\") pod \"swift-ring-rebalance-jvrtt\" (UID: \"7f7e23b6-3379-4af2-a33b-2e1b12288561\") " pod="swift-kuttl-tests/swift-ring-rebalance-jvrtt" Jan 29 12:33:17 crc kubenswrapper[4753]: I0129 12:33:17.213429 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6gp48\" (UniqueName: \"kubernetes.io/projected/7f7e23b6-3379-4af2-a33b-2e1b12288561-kube-api-access-6gp48\") pod \"swift-ring-rebalance-jvrtt\" (UID: \"7f7e23b6-3379-4af2-a33b-2e1b12288561\") " pod="swift-kuttl-tests/swift-ring-rebalance-jvrtt" Jan 29 12:33:17 crc kubenswrapper[4753]: I0129 12:33:17.213616 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/7f7e23b6-3379-4af2-a33b-2e1b12288561-etc-swift\") pod \"swift-ring-rebalance-jvrtt\" (UID: \"7f7e23b6-3379-4af2-a33b-2e1b12288561\") " pod="swift-kuttl-tests/swift-ring-rebalance-jvrtt" Jan 29 12:33:17 crc kubenswrapper[4753]: I0129 12:33:17.213679 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7f7e23b6-3379-4af2-a33b-2e1b12288561-scripts\") pod \"swift-ring-rebalance-jvrtt\" (UID: \"7f7e23b6-3379-4af2-a33b-2e1b12288561\") " pod="swift-kuttl-tests/swift-ring-rebalance-jvrtt" Jan 29 12:33:17 crc kubenswrapper[4753]: I0129 12:33:17.315790 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7f7e23b6-3379-4af2-a33b-2e1b12288561-combined-ca-bundle\") pod \"swift-ring-rebalance-jvrtt\" (UID: \"7f7e23b6-3379-4af2-a33b-2e1b12288561\") " pod="swift-kuttl-tests/swift-ring-rebalance-jvrtt" Jan 29 12:33:17 crc kubenswrapper[4753]: I0129 12:33:17.315869 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/7f7e23b6-3379-4af2-a33b-2e1b12288561-ring-data-devices\") pod \"swift-ring-rebalance-jvrtt\" (UID: \"7f7e23b6-3379-4af2-a33b-2e1b12288561\") " pod="swift-kuttl-tests/swift-ring-rebalance-jvrtt" Jan 29 12:33:17 crc kubenswrapper[4753]: I0129 12:33:17.315898 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/7f7e23b6-3379-4af2-a33b-2e1b12288561-swiftconf\") pod \"swift-ring-rebalance-jvrtt\" (UID: \"7f7e23b6-3379-4af2-a33b-2e1b12288561\") " pod="swift-kuttl-tests/swift-ring-rebalance-jvrtt" Jan 29 12:33:17 crc kubenswrapper[4753]: I0129 12:33:17.315929 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6gp48\" (UniqueName: \"kubernetes.io/projected/7f7e23b6-3379-4af2-a33b-2e1b12288561-kube-api-access-6gp48\") pod \"swift-ring-rebalance-jvrtt\" (UID: \"7f7e23b6-3379-4af2-a33b-2e1b12288561\") " pod="swift-kuttl-tests/swift-ring-rebalance-jvrtt" Jan 29 12:33:17 crc kubenswrapper[4753]: I0129 12:33:17.315965 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/7f7e23b6-3379-4af2-a33b-2e1b12288561-dispersionconf\") pod \"swift-ring-rebalance-jvrtt\" (UID: \"7f7e23b6-3379-4af2-a33b-2e1b12288561\") " pod="swift-kuttl-tests/swift-ring-rebalance-jvrtt" Jan 29 12:33:17 crc kubenswrapper[4753]: I0129 12:33:17.315994 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/7f7e23b6-3379-4af2-a33b-2e1b12288561-etc-swift\") pod \"swift-ring-rebalance-jvrtt\" (UID: \"7f7e23b6-3379-4af2-a33b-2e1b12288561\") " pod="swift-kuttl-tests/swift-ring-rebalance-jvrtt" Jan 29 12:33:17 crc kubenswrapper[4753]: I0129 12:33:17.316038 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7f7e23b6-3379-4af2-a33b-2e1b12288561-scripts\") pod \"swift-ring-rebalance-jvrtt\" (UID: \"7f7e23b6-3379-4af2-a33b-2e1b12288561\") " pod="swift-kuttl-tests/swift-ring-rebalance-jvrtt" Jan 29 12:33:17 crc kubenswrapper[4753]: I0129 12:33:17.316942 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7f7e23b6-3379-4af2-a33b-2e1b12288561-scripts\") pod \"swift-ring-rebalance-jvrtt\" (UID: \"7f7e23b6-3379-4af2-a33b-2e1b12288561\") " pod="swift-kuttl-tests/swift-ring-rebalance-jvrtt" Jan 29 12:33:17 crc kubenswrapper[4753]: I0129 12:33:17.317753 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/7f7e23b6-3379-4af2-a33b-2e1b12288561-etc-swift\") pod \"swift-ring-rebalance-jvrtt\" (UID: \"7f7e23b6-3379-4af2-a33b-2e1b12288561\") " pod="swift-kuttl-tests/swift-ring-rebalance-jvrtt" Jan 29 12:33:17 crc kubenswrapper[4753]: I0129 12:33:17.318195 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/7f7e23b6-3379-4af2-a33b-2e1b12288561-ring-data-devices\") pod \"swift-ring-rebalance-jvrtt\" (UID: \"7f7e23b6-3379-4af2-a33b-2e1b12288561\") " pod="swift-kuttl-tests/swift-ring-rebalance-jvrtt" Jan 29 12:33:17 crc kubenswrapper[4753]: I0129 12:33:17.320476 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7f7e23b6-3379-4af2-a33b-2e1b12288561-combined-ca-bundle\") pod \"swift-ring-rebalance-jvrtt\" (UID: \"7f7e23b6-3379-4af2-a33b-2e1b12288561\") " pod="swift-kuttl-tests/swift-ring-rebalance-jvrtt" Jan 29 12:33:17 crc kubenswrapper[4753]: I0129 12:33:17.323787 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/7f7e23b6-3379-4af2-a33b-2e1b12288561-swiftconf\") pod \"swift-ring-rebalance-jvrtt\" (UID: \"7f7e23b6-3379-4af2-a33b-2e1b12288561\") " pod="swift-kuttl-tests/swift-ring-rebalance-jvrtt" Jan 29 12:33:17 crc kubenswrapper[4753]: I0129 12:33:17.332895 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/7f7e23b6-3379-4af2-a33b-2e1b12288561-dispersionconf\") pod \"swift-ring-rebalance-jvrtt\" (UID: \"7f7e23b6-3379-4af2-a33b-2e1b12288561\") " pod="swift-kuttl-tests/swift-ring-rebalance-jvrtt" Jan 29 12:33:17 crc kubenswrapper[4753]: I0129 12:33:17.336055 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6gp48\" (UniqueName: \"kubernetes.io/projected/7f7e23b6-3379-4af2-a33b-2e1b12288561-kube-api-access-6gp48\") pod \"swift-ring-rebalance-jvrtt\" (UID: \"7f7e23b6-3379-4af2-a33b-2e1b12288561\") " pod="swift-kuttl-tests/swift-ring-rebalance-jvrtt" Jan 29 12:33:17 crc kubenswrapper[4753]: I0129 12:33:17.392155 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-ring-rebalance-jvrtt" Jan 29 12:33:17 crc kubenswrapper[4753]: I0129 12:33:17.707242 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/440fbaaa-a825-4b7c-b5f5-6cad02f05493-etc-swift\") pod \"swift-storage-0\" (UID: \"440fbaaa-a825-4b7c-b5f5-6cad02f05493\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:33:17 crc kubenswrapper[4753]: E0129 12:33:17.707596 4753 projected.go:288] Couldn't get configMap swift-kuttl-tests/swift-ring-files: configmap "swift-ring-files" not found Jan 29 12:33:17 crc kubenswrapper[4753]: E0129 12:33:17.707615 4753 projected.go:194] Error preparing data for projected volume etc-swift for pod swift-kuttl-tests/swift-storage-0: configmap "swift-ring-files" not found Jan 29 12:33:17 crc kubenswrapper[4753]: E0129 12:33:17.707678 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/440fbaaa-a825-4b7c-b5f5-6cad02f05493-etc-swift podName:440fbaaa-a825-4b7c-b5f5-6cad02f05493 nodeName:}" failed. No retries permitted until 2026-01-29 12:33:18.70765822 +0000 UTC m=+1612.959739675 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/440fbaaa-a825-4b7c-b5f5-6cad02f05493-etc-swift") pod "swift-storage-0" (UID: "440fbaaa-a825-4b7c-b5f5-6cad02f05493") : configmap "swift-ring-files" not found Jan 29 12:33:18 crc kubenswrapper[4753]: I0129 12:33:18.066407 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["swift-kuttl-tests/swift-proxy-6dc98fc8fb-98r45"] Jan 29 12:33:18 crc kubenswrapper[4753]: I0129 12:33:18.069321 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-proxy-6dc98fc8fb-98r45" Jan 29 12:33:18 crc kubenswrapper[4753]: I0129 12:33:18.074477 4753 reflector.go:368] Caches populated for *v1.Secret from object-"swift-kuttl-tests"/"cert-swift-public-svc" Jan 29 12:33:18 crc kubenswrapper[4753]: I0129 12:33:18.074913 4753 reflector.go:368] Caches populated for *v1.Secret from object-"swift-kuttl-tests"/"cert-swift-internal-svc" Jan 29 12:33:18 crc kubenswrapper[4753]: I0129 12:33:18.097518 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/swift-proxy-6dc98fc8fb-98r45"] Jan 29 12:33:18 crc kubenswrapper[4753]: I0129 12:33:18.184505 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/701dafed-ce7f-4a89-982d-f243757d097f-combined-ca-bundle\") pod \"swift-proxy-6dc98fc8fb-98r45\" (UID: \"701dafed-ce7f-4a89-982d-f243757d097f\") " pod="swift-kuttl-tests/swift-proxy-6dc98fc8fb-98r45" Jan 29 12:33:18 crc kubenswrapper[4753]: I0129 12:33:18.184563 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/701dafed-ce7f-4a89-982d-f243757d097f-config-data\") pod \"swift-proxy-6dc98fc8fb-98r45\" (UID: \"701dafed-ce7f-4a89-982d-f243757d097f\") " pod="swift-kuttl-tests/swift-proxy-6dc98fc8fb-98r45" Jan 29 12:33:18 crc kubenswrapper[4753]: I0129 12:33:18.184646 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/701dafed-ce7f-4a89-982d-f243757d097f-etc-swift\") pod \"swift-proxy-6dc98fc8fb-98r45\" (UID: \"701dafed-ce7f-4a89-982d-f243757d097f\") " pod="swift-kuttl-tests/swift-proxy-6dc98fc8fb-98r45" Jan 29 12:33:18 crc kubenswrapper[4753]: I0129 12:33:18.184723 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/701dafed-ce7f-4a89-982d-f243757d097f-run-httpd\") pod \"swift-proxy-6dc98fc8fb-98r45\" (UID: \"701dafed-ce7f-4a89-982d-f243757d097f\") " pod="swift-kuttl-tests/swift-proxy-6dc98fc8fb-98r45" Jan 29 12:33:18 crc kubenswrapper[4753]: I0129 12:33:18.184758 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/701dafed-ce7f-4a89-982d-f243757d097f-public-tls-certs\") pod \"swift-proxy-6dc98fc8fb-98r45\" (UID: \"701dafed-ce7f-4a89-982d-f243757d097f\") " pod="swift-kuttl-tests/swift-proxy-6dc98fc8fb-98r45" Jan 29 12:33:18 crc kubenswrapper[4753]: I0129 12:33:18.184789 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/701dafed-ce7f-4a89-982d-f243757d097f-internal-tls-certs\") pod \"swift-proxy-6dc98fc8fb-98r45\" (UID: \"701dafed-ce7f-4a89-982d-f243757d097f\") " pod="swift-kuttl-tests/swift-proxy-6dc98fc8fb-98r45" Jan 29 12:33:18 crc kubenswrapper[4753]: I0129 12:33:18.184846 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z9sdn\" (UniqueName: \"kubernetes.io/projected/701dafed-ce7f-4a89-982d-f243757d097f-kube-api-access-z9sdn\") pod \"swift-proxy-6dc98fc8fb-98r45\" (UID: \"701dafed-ce7f-4a89-982d-f243757d097f\") " pod="swift-kuttl-tests/swift-proxy-6dc98fc8fb-98r45" Jan 29 12:33:18 crc kubenswrapper[4753]: I0129 12:33:18.184918 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/701dafed-ce7f-4a89-982d-f243757d097f-log-httpd\") pod \"swift-proxy-6dc98fc8fb-98r45\" (UID: \"701dafed-ce7f-4a89-982d-f243757d097f\") " pod="swift-kuttl-tests/swift-proxy-6dc98fc8fb-98r45" Jan 29 12:33:18 crc kubenswrapper[4753]: I0129 12:33:18.286271 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/701dafed-ce7f-4a89-982d-f243757d097f-internal-tls-certs\") pod \"swift-proxy-6dc98fc8fb-98r45\" (UID: \"701dafed-ce7f-4a89-982d-f243757d097f\") " pod="swift-kuttl-tests/swift-proxy-6dc98fc8fb-98r45" Jan 29 12:33:18 crc kubenswrapper[4753]: I0129 12:33:18.286586 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z9sdn\" (UniqueName: \"kubernetes.io/projected/701dafed-ce7f-4a89-982d-f243757d097f-kube-api-access-z9sdn\") pod \"swift-proxy-6dc98fc8fb-98r45\" (UID: \"701dafed-ce7f-4a89-982d-f243757d097f\") " pod="swift-kuttl-tests/swift-proxy-6dc98fc8fb-98r45" Jan 29 12:33:18 crc kubenswrapper[4753]: I0129 12:33:18.286707 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/701dafed-ce7f-4a89-982d-f243757d097f-log-httpd\") pod \"swift-proxy-6dc98fc8fb-98r45\" (UID: \"701dafed-ce7f-4a89-982d-f243757d097f\") " pod="swift-kuttl-tests/swift-proxy-6dc98fc8fb-98r45" Jan 29 12:33:18 crc kubenswrapper[4753]: I0129 12:33:18.286865 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/701dafed-ce7f-4a89-982d-f243757d097f-combined-ca-bundle\") pod \"swift-proxy-6dc98fc8fb-98r45\" (UID: \"701dafed-ce7f-4a89-982d-f243757d097f\") " pod="swift-kuttl-tests/swift-proxy-6dc98fc8fb-98r45" Jan 29 12:33:18 crc kubenswrapper[4753]: I0129 12:33:18.286984 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/701dafed-ce7f-4a89-982d-f243757d097f-config-data\") pod \"swift-proxy-6dc98fc8fb-98r45\" (UID: \"701dafed-ce7f-4a89-982d-f243757d097f\") " pod="swift-kuttl-tests/swift-proxy-6dc98fc8fb-98r45" Jan 29 12:33:18 crc kubenswrapper[4753]: I0129 12:33:18.287097 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/701dafed-ce7f-4a89-982d-f243757d097f-etc-swift\") pod \"swift-proxy-6dc98fc8fb-98r45\" (UID: \"701dafed-ce7f-4a89-982d-f243757d097f\") " pod="swift-kuttl-tests/swift-proxy-6dc98fc8fb-98r45" Jan 29 12:33:18 crc kubenswrapper[4753]: I0129 12:33:18.287212 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/701dafed-ce7f-4a89-982d-f243757d097f-run-httpd\") pod \"swift-proxy-6dc98fc8fb-98r45\" (UID: \"701dafed-ce7f-4a89-982d-f243757d097f\") " pod="swift-kuttl-tests/swift-proxy-6dc98fc8fb-98r45" Jan 29 12:33:18 crc kubenswrapper[4753]: I0129 12:33:18.287346 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/701dafed-ce7f-4a89-982d-f243757d097f-public-tls-certs\") pod \"swift-proxy-6dc98fc8fb-98r45\" (UID: \"701dafed-ce7f-4a89-982d-f243757d097f\") " pod="swift-kuttl-tests/swift-proxy-6dc98fc8fb-98r45" Jan 29 12:33:18 crc kubenswrapper[4753]: E0129 12:33:18.288947 4753 projected.go:288] Couldn't get configMap swift-kuttl-tests/swift-ring-files: configmap "swift-ring-files" not found Jan 29 12:33:18 crc kubenswrapper[4753]: E0129 12:33:18.289090 4753 projected.go:194] Error preparing data for projected volume etc-swift for pod swift-kuttl-tests/swift-proxy-6dc98fc8fb-98r45: configmap "swift-ring-files" not found Jan 29 12:33:18 crc kubenswrapper[4753]: E0129 12:33:18.289261 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/701dafed-ce7f-4a89-982d-f243757d097f-etc-swift podName:701dafed-ce7f-4a89-982d-f243757d097f nodeName:}" failed. No retries permitted until 2026-01-29 12:33:18.789236418 +0000 UTC m=+1613.041317883 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/701dafed-ce7f-4a89-982d-f243757d097f-etc-swift") pod "swift-proxy-6dc98fc8fb-98r45" (UID: "701dafed-ce7f-4a89-982d-f243757d097f") : configmap "swift-ring-files" not found Jan 29 12:33:18 crc kubenswrapper[4753]: I0129 12:33:18.289335 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/701dafed-ce7f-4a89-982d-f243757d097f-log-httpd\") pod \"swift-proxy-6dc98fc8fb-98r45\" (UID: \"701dafed-ce7f-4a89-982d-f243757d097f\") " pod="swift-kuttl-tests/swift-proxy-6dc98fc8fb-98r45" Jan 29 12:33:18 crc kubenswrapper[4753]: I0129 12:33:18.289026 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/701dafed-ce7f-4a89-982d-f243757d097f-run-httpd\") pod \"swift-proxy-6dc98fc8fb-98r45\" (UID: \"701dafed-ce7f-4a89-982d-f243757d097f\") " pod="swift-kuttl-tests/swift-proxy-6dc98fc8fb-98r45" Jan 29 12:33:18 crc kubenswrapper[4753]: I0129 12:33:18.292135 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/701dafed-ce7f-4a89-982d-f243757d097f-combined-ca-bundle\") pod \"swift-proxy-6dc98fc8fb-98r45\" (UID: \"701dafed-ce7f-4a89-982d-f243757d097f\") " pod="swift-kuttl-tests/swift-proxy-6dc98fc8fb-98r45" Jan 29 12:33:18 crc kubenswrapper[4753]: I0129 12:33:18.292620 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/701dafed-ce7f-4a89-982d-f243757d097f-public-tls-certs\") pod \"swift-proxy-6dc98fc8fb-98r45\" (UID: \"701dafed-ce7f-4a89-982d-f243757d097f\") " pod="swift-kuttl-tests/swift-proxy-6dc98fc8fb-98r45" Jan 29 12:33:18 crc kubenswrapper[4753]: I0129 12:33:18.293603 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/701dafed-ce7f-4a89-982d-f243757d097f-config-data\") pod \"swift-proxy-6dc98fc8fb-98r45\" (UID: \"701dafed-ce7f-4a89-982d-f243757d097f\") " pod="swift-kuttl-tests/swift-proxy-6dc98fc8fb-98r45" Jan 29 12:33:18 crc kubenswrapper[4753]: I0129 12:33:18.294035 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/701dafed-ce7f-4a89-982d-f243757d097f-internal-tls-certs\") pod \"swift-proxy-6dc98fc8fb-98r45\" (UID: \"701dafed-ce7f-4a89-982d-f243757d097f\") " pod="swift-kuttl-tests/swift-proxy-6dc98fc8fb-98r45" Jan 29 12:33:18 crc kubenswrapper[4753]: I0129 12:33:18.315898 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z9sdn\" (UniqueName: \"kubernetes.io/projected/701dafed-ce7f-4a89-982d-f243757d097f-kube-api-access-z9sdn\") pod \"swift-proxy-6dc98fc8fb-98r45\" (UID: \"701dafed-ce7f-4a89-982d-f243757d097f\") " pod="swift-kuttl-tests/swift-proxy-6dc98fc8fb-98r45" Jan 29 12:33:18 crc kubenswrapper[4753]: I0129 12:33:18.372841 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/swift-ring-rebalance-jvrtt"] Jan 29 12:33:18 crc kubenswrapper[4753]: I0129 12:33:18.806428 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/701dafed-ce7f-4a89-982d-f243757d097f-etc-swift\") pod \"swift-proxy-6dc98fc8fb-98r45\" (UID: \"701dafed-ce7f-4a89-982d-f243757d097f\") " pod="swift-kuttl-tests/swift-proxy-6dc98fc8fb-98r45" Jan 29 12:33:18 crc kubenswrapper[4753]: I0129 12:33:18.806735 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/440fbaaa-a825-4b7c-b5f5-6cad02f05493-etc-swift\") pod \"swift-storage-0\" (UID: \"440fbaaa-a825-4b7c-b5f5-6cad02f05493\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:33:18 crc kubenswrapper[4753]: E0129 12:33:18.806819 4753 projected.go:288] Couldn't get configMap swift-kuttl-tests/swift-ring-files: configmap "swift-ring-files" not found Jan 29 12:33:18 crc kubenswrapper[4753]: E0129 12:33:18.806864 4753 projected.go:194] Error preparing data for projected volume etc-swift for pod swift-kuttl-tests/swift-proxy-6dc98fc8fb-98r45: configmap "swift-ring-files" not found Jan 29 12:33:18 crc kubenswrapper[4753]: E0129 12:33:18.806977 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/701dafed-ce7f-4a89-982d-f243757d097f-etc-swift podName:701dafed-ce7f-4a89-982d-f243757d097f nodeName:}" failed. No retries permitted until 2026-01-29 12:33:19.806944041 +0000 UTC m=+1614.059025536 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/701dafed-ce7f-4a89-982d-f243757d097f-etc-swift") pod "swift-proxy-6dc98fc8fb-98r45" (UID: "701dafed-ce7f-4a89-982d-f243757d097f") : configmap "swift-ring-files" not found Jan 29 12:33:18 crc kubenswrapper[4753]: E0129 12:33:18.807161 4753 projected.go:288] Couldn't get configMap swift-kuttl-tests/swift-ring-files: configmap "swift-ring-files" not found Jan 29 12:33:18 crc kubenswrapper[4753]: E0129 12:33:18.807278 4753 projected.go:194] Error preparing data for projected volume etc-swift for pod swift-kuttl-tests/swift-storage-0: configmap "swift-ring-files" not found Jan 29 12:33:18 crc kubenswrapper[4753]: E0129 12:33:18.807463 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/440fbaaa-a825-4b7c-b5f5-6cad02f05493-etc-swift podName:440fbaaa-a825-4b7c-b5f5-6cad02f05493 nodeName:}" failed. No retries permitted until 2026-01-29 12:33:20.807420824 +0000 UTC m=+1615.059502319 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/440fbaaa-a825-4b7c-b5f5-6cad02f05493-etc-swift") pod "swift-storage-0" (UID: "440fbaaa-a825-4b7c-b5f5-6cad02f05493") : configmap "swift-ring-files" not found Jan 29 12:33:19 crc kubenswrapper[4753]: I0129 12:33:19.290546 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-ring-rebalance-jvrtt" event={"ID":"7f7e23b6-3379-4af2-a33b-2e1b12288561","Type":"ContainerStarted","Data":"a1d1e74baad32171276f9714c7ea2822f176dbdbeef1fcf774facda25ff25814"} Jan 29 12:33:19 crc kubenswrapper[4753]: I0129 12:33:19.835041 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/701dafed-ce7f-4a89-982d-f243757d097f-etc-swift\") pod \"swift-proxy-6dc98fc8fb-98r45\" (UID: \"701dafed-ce7f-4a89-982d-f243757d097f\") " pod="swift-kuttl-tests/swift-proxy-6dc98fc8fb-98r45" Jan 29 12:33:19 crc kubenswrapper[4753]: E0129 12:33:19.835273 4753 projected.go:288] Couldn't get configMap swift-kuttl-tests/swift-ring-files: configmap "swift-ring-files" not found Jan 29 12:33:19 crc kubenswrapper[4753]: E0129 12:33:19.835547 4753 projected.go:194] Error preparing data for projected volume etc-swift for pod swift-kuttl-tests/swift-proxy-6dc98fc8fb-98r45: configmap "swift-ring-files" not found Jan 29 12:33:19 crc kubenswrapper[4753]: E0129 12:33:19.835653 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/701dafed-ce7f-4a89-982d-f243757d097f-etc-swift podName:701dafed-ce7f-4a89-982d-f243757d097f nodeName:}" failed. No retries permitted until 2026-01-29 12:33:21.835633662 +0000 UTC m=+1616.087715117 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/701dafed-ce7f-4a89-982d-f243757d097f-etc-swift") pod "swift-proxy-6dc98fc8fb-98r45" (UID: "701dafed-ce7f-4a89-982d-f243757d097f") : configmap "swift-ring-files" not found Jan 29 12:33:20 crc kubenswrapper[4753]: I0129 12:33:20.302094 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-ring-rebalance-jvrtt" event={"ID":"7f7e23b6-3379-4af2-a33b-2e1b12288561","Type":"ContainerStarted","Data":"46f7c35ab86b89f8376627ab298147cb497e7949a367a37e03bef56228da0463"} Jan 29 12:33:20 crc kubenswrapper[4753]: I0129 12:33:20.330103 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="swift-kuttl-tests/swift-ring-rebalance-jvrtt" podStartSLOduration=3.3300555259999998 podStartE2EDuration="3.330055526s" podCreationTimestamp="2026-01-29 12:33:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:33:20.325271329 +0000 UTC m=+1614.577352794" watchObservedRunningTime="2026-01-29 12:33:20.330055526 +0000 UTC m=+1614.582136981" Jan 29 12:33:20 crc kubenswrapper[4753]: I0129 12:33:20.877197 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/440fbaaa-a825-4b7c-b5f5-6cad02f05493-etc-swift\") pod \"swift-storage-0\" (UID: \"440fbaaa-a825-4b7c-b5f5-6cad02f05493\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:33:20 crc kubenswrapper[4753]: E0129 12:33:20.877380 4753 projected.go:288] Couldn't get configMap swift-kuttl-tests/swift-ring-files: configmap "swift-ring-files" not found Jan 29 12:33:20 crc kubenswrapper[4753]: E0129 12:33:20.877688 4753 projected.go:194] Error preparing data for projected volume etc-swift for pod swift-kuttl-tests/swift-storage-0: configmap "swift-ring-files" not found Jan 29 12:33:20 crc kubenswrapper[4753]: E0129 12:33:20.877749 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/440fbaaa-a825-4b7c-b5f5-6cad02f05493-etc-swift podName:440fbaaa-a825-4b7c-b5f5-6cad02f05493 nodeName:}" failed. No retries permitted until 2026-01-29 12:33:24.877729686 +0000 UTC m=+1619.129811151 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/440fbaaa-a825-4b7c-b5f5-6cad02f05493-etc-swift") pod "swift-storage-0" (UID: "440fbaaa-a825-4b7c-b5f5-6cad02f05493") : configmap "swift-ring-files" not found Jan 29 12:33:21 crc kubenswrapper[4753]: I0129 12:33:21.931270 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/701dafed-ce7f-4a89-982d-f243757d097f-etc-swift\") pod \"swift-proxy-6dc98fc8fb-98r45\" (UID: \"701dafed-ce7f-4a89-982d-f243757d097f\") " pod="swift-kuttl-tests/swift-proxy-6dc98fc8fb-98r45" Jan 29 12:33:21 crc kubenswrapper[4753]: E0129 12:33:21.931512 4753 projected.go:288] Couldn't get configMap swift-kuttl-tests/swift-ring-files: configmap "swift-ring-files" not found Jan 29 12:33:21 crc kubenswrapper[4753]: E0129 12:33:21.931874 4753 projected.go:194] Error preparing data for projected volume etc-swift for pod swift-kuttl-tests/swift-proxy-6dc98fc8fb-98r45: configmap "swift-ring-files" not found Jan 29 12:33:21 crc kubenswrapper[4753]: E0129 12:33:21.931964 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/701dafed-ce7f-4a89-982d-f243757d097f-etc-swift podName:701dafed-ce7f-4a89-982d-f243757d097f nodeName:}" failed. No retries permitted until 2026-01-29 12:33:25.931932102 +0000 UTC m=+1620.184013557 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/701dafed-ce7f-4a89-982d-f243757d097f-etc-swift") pod "swift-proxy-6dc98fc8fb-98r45" (UID: "701dafed-ce7f-4a89-982d-f243757d097f") : configmap "swift-ring-files" not found Jan 29 12:33:24 crc kubenswrapper[4753]: I0129 12:33:24.653737 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-9qkh7"] Jan 29 12:33:24 crc kubenswrapper[4753]: I0129 12:33:24.655168 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9qkh7" Jan 29 12:33:24 crc kubenswrapper[4753]: I0129 12:33:24.681786 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-9qkh7"] Jan 29 12:33:24 crc kubenswrapper[4753]: I0129 12:33:24.798436 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aadb20d7-4116-47ca-9c1e-41d54eb80a6a-utilities\") pod \"certified-operators-9qkh7\" (UID: \"aadb20d7-4116-47ca-9c1e-41d54eb80a6a\") " pod="openshift-marketplace/certified-operators-9qkh7" Jan 29 12:33:24 crc kubenswrapper[4753]: I0129 12:33:24.799013 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-78qhd\" (UniqueName: \"kubernetes.io/projected/aadb20d7-4116-47ca-9c1e-41d54eb80a6a-kube-api-access-78qhd\") pod \"certified-operators-9qkh7\" (UID: \"aadb20d7-4116-47ca-9c1e-41d54eb80a6a\") " pod="openshift-marketplace/certified-operators-9qkh7" Jan 29 12:33:24 crc kubenswrapper[4753]: I0129 12:33:24.799087 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aadb20d7-4116-47ca-9c1e-41d54eb80a6a-catalog-content\") pod \"certified-operators-9qkh7\" (UID: \"aadb20d7-4116-47ca-9c1e-41d54eb80a6a\") " pod="openshift-marketplace/certified-operators-9qkh7" Jan 29 12:33:24 crc kubenswrapper[4753]: I0129 12:33:24.900698 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-78qhd\" (UniqueName: \"kubernetes.io/projected/aadb20d7-4116-47ca-9c1e-41d54eb80a6a-kube-api-access-78qhd\") pod \"certified-operators-9qkh7\" (UID: \"aadb20d7-4116-47ca-9c1e-41d54eb80a6a\") " pod="openshift-marketplace/certified-operators-9qkh7" Jan 29 12:33:24 crc kubenswrapper[4753]: I0129 12:33:24.901184 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aadb20d7-4116-47ca-9c1e-41d54eb80a6a-catalog-content\") pod \"certified-operators-9qkh7\" (UID: \"aadb20d7-4116-47ca-9c1e-41d54eb80a6a\") " pod="openshift-marketplace/certified-operators-9qkh7" Jan 29 12:33:24 crc kubenswrapper[4753]: I0129 12:33:24.901446 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aadb20d7-4116-47ca-9c1e-41d54eb80a6a-utilities\") pod \"certified-operators-9qkh7\" (UID: \"aadb20d7-4116-47ca-9c1e-41d54eb80a6a\") " pod="openshift-marketplace/certified-operators-9qkh7" Jan 29 12:33:24 crc kubenswrapper[4753]: I0129 12:33:24.901497 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/440fbaaa-a825-4b7c-b5f5-6cad02f05493-etc-swift\") pod \"swift-storage-0\" (UID: \"440fbaaa-a825-4b7c-b5f5-6cad02f05493\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:33:24 crc kubenswrapper[4753]: E0129 12:33:24.901649 4753 projected.go:288] Couldn't get configMap swift-kuttl-tests/swift-ring-files: configmap "swift-ring-files" not found Jan 29 12:33:24 crc kubenswrapper[4753]: E0129 12:33:24.901673 4753 projected.go:194] Error preparing data for projected volume etc-swift for pod swift-kuttl-tests/swift-storage-0: configmap "swift-ring-files" not found Jan 29 12:33:24 crc kubenswrapper[4753]: E0129 12:33:24.901716 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/440fbaaa-a825-4b7c-b5f5-6cad02f05493-etc-swift podName:440fbaaa-a825-4b7c-b5f5-6cad02f05493 nodeName:}" failed. No retries permitted until 2026-01-29 12:33:32.901700999 +0000 UTC m=+1627.153782454 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/440fbaaa-a825-4b7c-b5f5-6cad02f05493-etc-swift") pod "swift-storage-0" (UID: "440fbaaa-a825-4b7c-b5f5-6cad02f05493") : configmap "swift-ring-files" not found Jan 29 12:33:24 crc kubenswrapper[4753]: I0129 12:33:24.902655 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aadb20d7-4116-47ca-9c1e-41d54eb80a6a-catalog-content\") pod \"certified-operators-9qkh7\" (UID: \"aadb20d7-4116-47ca-9c1e-41d54eb80a6a\") " pod="openshift-marketplace/certified-operators-9qkh7" Jan 29 12:33:24 crc kubenswrapper[4753]: I0129 12:33:24.902699 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aadb20d7-4116-47ca-9c1e-41d54eb80a6a-utilities\") pod \"certified-operators-9qkh7\" (UID: \"aadb20d7-4116-47ca-9c1e-41d54eb80a6a\") " pod="openshift-marketplace/certified-operators-9qkh7" Jan 29 12:33:24 crc kubenswrapper[4753]: I0129 12:33:24.923183 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-78qhd\" (UniqueName: \"kubernetes.io/projected/aadb20d7-4116-47ca-9c1e-41d54eb80a6a-kube-api-access-78qhd\") pod \"certified-operators-9qkh7\" (UID: \"aadb20d7-4116-47ca-9c1e-41d54eb80a6a\") " pod="openshift-marketplace/certified-operators-9qkh7" Jan 29 12:33:24 crc kubenswrapper[4753]: I0129 12:33:24.977657 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9qkh7" Jan 29 12:33:25 crc kubenswrapper[4753]: I0129 12:33:25.551745 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-9qkh7"] Jan 29 12:33:25 crc kubenswrapper[4753]: I0129 12:33:25.888921 4753 scope.go:117] "RemoveContainer" containerID="00211d2562b6632acb04decda9045cbb8d3216adca69cd5bc1380ec42b8211e0" Jan 29 12:33:25 crc kubenswrapper[4753]: E0129 12:33:25.889345 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7c24x_openshift-machine-config-operator(b0310995-a7c7-47c3-ae6c-05daaaba92a6)\"" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" podUID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" Jan 29 12:33:25 crc kubenswrapper[4753]: I0129 12:33:25.960268 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/701dafed-ce7f-4a89-982d-f243757d097f-etc-swift\") pod \"swift-proxy-6dc98fc8fb-98r45\" (UID: \"701dafed-ce7f-4a89-982d-f243757d097f\") " pod="swift-kuttl-tests/swift-proxy-6dc98fc8fb-98r45" Jan 29 12:33:25 crc kubenswrapper[4753]: E0129 12:33:25.960565 4753 projected.go:288] Couldn't get configMap swift-kuttl-tests/swift-ring-files: configmap "swift-ring-files" not found Jan 29 12:33:25 crc kubenswrapper[4753]: E0129 12:33:25.960710 4753 projected.go:194] Error preparing data for projected volume etc-swift for pod swift-kuttl-tests/swift-proxy-6dc98fc8fb-98r45: configmap "swift-ring-files" not found Jan 29 12:33:25 crc kubenswrapper[4753]: E0129 12:33:25.960809 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/701dafed-ce7f-4a89-982d-f243757d097f-etc-swift podName:701dafed-ce7f-4a89-982d-f243757d097f nodeName:}" failed. No retries permitted until 2026-01-29 12:33:33.960779155 +0000 UTC m=+1628.212860620 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/701dafed-ce7f-4a89-982d-f243757d097f-etc-swift") pod "swift-proxy-6dc98fc8fb-98r45" (UID: "701dafed-ce7f-4a89-982d-f243757d097f") : configmap "swift-ring-files" not found Jan 29 12:33:26 crc kubenswrapper[4753]: I0129 12:33:26.364096 4753 generic.go:334] "Generic (PLEG): container finished" podID="aadb20d7-4116-47ca-9c1e-41d54eb80a6a" containerID="6980bac95a3044928f273e9646567a1f6eb86db4dcf6dfd1f524cf97ad0bc582" exitCode=0 Jan 29 12:33:26 crc kubenswrapper[4753]: I0129 12:33:26.364151 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9qkh7" event={"ID":"aadb20d7-4116-47ca-9c1e-41d54eb80a6a","Type":"ContainerDied","Data":"6980bac95a3044928f273e9646567a1f6eb86db4dcf6dfd1f524cf97ad0bc582"} Jan 29 12:33:26 crc kubenswrapper[4753]: I0129 12:33:26.364189 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9qkh7" event={"ID":"aadb20d7-4116-47ca-9c1e-41d54eb80a6a","Type":"ContainerStarted","Data":"5b0ebbf3a1a9c8a03bdac492b3b811fdfcfed4c2f43aeb9bdf8759ef2d4e2e9e"} Jan 29 12:33:30 crc kubenswrapper[4753]: I0129 12:33:30.397528 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9qkh7" event={"ID":"aadb20d7-4116-47ca-9c1e-41d54eb80a6a","Type":"ContainerStarted","Data":"a775f37947eddea4396e931b71b48ce427539ca47e8ef8512a625c2ff7fcf4ed"} Jan 29 12:33:31 crc kubenswrapper[4753]: I0129 12:33:31.057636 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["swift-kuttl-tests/root-account-create-update-ts8d4"] Jan 29 12:33:31 crc kubenswrapper[4753]: I0129 12:33:31.065649 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["swift-kuttl-tests/root-account-create-update-ts8d4"] Jan 29 12:33:31 crc kubenswrapper[4753]: I0129 12:33:31.410019 4753 generic.go:334] "Generic (PLEG): container finished" podID="aadb20d7-4116-47ca-9c1e-41d54eb80a6a" containerID="a775f37947eddea4396e931b71b48ce427539ca47e8ef8512a625c2ff7fcf4ed" exitCode=0 Jan 29 12:33:31 crc kubenswrapper[4753]: I0129 12:33:31.410089 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9qkh7" event={"ID":"aadb20d7-4116-47ca-9c1e-41d54eb80a6a","Type":"ContainerDied","Data":"a775f37947eddea4396e931b71b48ce427539ca47e8ef8512a625c2ff7fcf4ed"} Jan 29 12:33:31 crc kubenswrapper[4753]: I0129 12:33:31.414246 4753 generic.go:334] "Generic (PLEG): container finished" podID="7f7e23b6-3379-4af2-a33b-2e1b12288561" containerID="46f7c35ab86b89f8376627ab298147cb497e7949a367a37e03bef56228da0463" exitCode=0 Jan 29 12:33:31 crc kubenswrapper[4753]: I0129 12:33:31.414294 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-ring-rebalance-jvrtt" event={"ID":"7f7e23b6-3379-4af2-a33b-2e1b12288561","Type":"ContainerDied","Data":"46f7c35ab86b89f8376627ab298147cb497e7949a367a37e03bef56228da0463"} Jan 29 12:33:31 crc kubenswrapper[4753]: I0129 12:33:31.895938 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="62c6dedf-3f69-486b-9dac-2fcef82d1571" path="/var/lib/kubelet/pods/62c6dedf-3f69-486b-9dac-2fcef82d1571/volumes" Jan 29 12:33:32 crc kubenswrapper[4753]: I0129 12:33:32.799183 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-ring-rebalance-jvrtt" Jan 29 12:33:32 crc kubenswrapper[4753]: I0129 12:33:32.867594 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/7f7e23b6-3379-4af2-a33b-2e1b12288561-swiftconf\") pod \"7f7e23b6-3379-4af2-a33b-2e1b12288561\" (UID: \"7f7e23b6-3379-4af2-a33b-2e1b12288561\") " Jan 29 12:33:32 crc kubenswrapper[4753]: I0129 12:33:32.867649 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/7f7e23b6-3379-4af2-a33b-2e1b12288561-etc-swift\") pod \"7f7e23b6-3379-4af2-a33b-2e1b12288561\" (UID: \"7f7e23b6-3379-4af2-a33b-2e1b12288561\") " Jan 29 12:33:32 crc kubenswrapper[4753]: I0129 12:33:32.867728 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6gp48\" (UniqueName: \"kubernetes.io/projected/7f7e23b6-3379-4af2-a33b-2e1b12288561-kube-api-access-6gp48\") pod \"7f7e23b6-3379-4af2-a33b-2e1b12288561\" (UID: \"7f7e23b6-3379-4af2-a33b-2e1b12288561\") " Jan 29 12:33:32 crc kubenswrapper[4753]: I0129 12:33:32.867773 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/7f7e23b6-3379-4af2-a33b-2e1b12288561-ring-data-devices\") pod \"7f7e23b6-3379-4af2-a33b-2e1b12288561\" (UID: \"7f7e23b6-3379-4af2-a33b-2e1b12288561\") " Jan 29 12:33:32 crc kubenswrapper[4753]: I0129 12:33:32.867858 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/7f7e23b6-3379-4af2-a33b-2e1b12288561-dispersionconf\") pod \"7f7e23b6-3379-4af2-a33b-2e1b12288561\" (UID: \"7f7e23b6-3379-4af2-a33b-2e1b12288561\") " Jan 29 12:33:32 crc kubenswrapper[4753]: I0129 12:33:32.867914 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7f7e23b6-3379-4af2-a33b-2e1b12288561-combined-ca-bundle\") pod \"7f7e23b6-3379-4af2-a33b-2e1b12288561\" (UID: \"7f7e23b6-3379-4af2-a33b-2e1b12288561\") " Jan 29 12:33:32 crc kubenswrapper[4753]: I0129 12:33:32.867949 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7f7e23b6-3379-4af2-a33b-2e1b12288561-scripts\") pod \"7f7e23b6-3379-4af2-a33b-2e1b12288561\" (UID: \"7f7e23b6-3379-4af2-a33b-2e1b12288561\") " Jan 29 12:33:32 crc kubenswrapper[4753]: I0129 12:33:32.869130 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7f7e23b6-3379-4af2-a33b-2e1b12288561-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "7f7e23b6-3379-4af2-a33b-2e1b12288561" (UID: "7f7e23b6-3379-4af2-a33b-2e1b12288561"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:33:32 crc kubenswrapper[4753]: I0129 12:33:32.874533 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7f7e23b6-3379-4af2-a33b-2e1b12288561-kube-api-access-6gp48" (OuterVolumeSpecName: "kube-api-access-6gp48") pod "7f7e23b6-3379-4af2-a33b-2e1b12288561" (UID: "7f7e23b6-3379-4af2-a33b-2e1b12288561"). InnerVolumeSpecName "kube-api-access-6gp48". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:33:32 crc kubenswrapper[4753]: I0129 12:33:32.875516 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7f7e23b6-3379-4af2-a33b-2e1b12288561-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "7f7e23b6-3379-4af2-a33b-2e1b12288561" (UID: "7f7e23b6-3379-4af2-a33b-2e1b12288561"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:33:32 crc kubenswrapper[4753]: I0129 12:33:32.888206 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7f7e23b6-3379-4af2-a33b-2e1b12288561-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "7f7e23b6-3379-4af2-a33b-2e1b12288561" (UID: "7f7e23b6-3379-4af2-a33b-2e1b12288561"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:33:32 crc kubenswrapper[4753]: I0129 12:33:32.890259 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7f7e23b6-3379-4af2-a33b-2e1b12288561-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7f7e23b6-3379-4af2-a33b-2e1b12288561" (UID: "7f7e23b6-3379-4af2-a33b-2e1b12288561"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:33:32 crc kubenswrapper[4753]: I0129 12:33:32.898349 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7f7e23b6-3379-4af2-a33b-2e1b12288561-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "7f7e23b6-3379-4af2-a33b-2e1b12288561" (UID: "7f7e23b6-3379-4af2-a33b-2e1b12288561"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:33:32 crc kubenswrapper[4753]: I0129 12:33:32.908111 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7f7e23b6-3379-4af2-a33b-2e1b12288561-scripts" (OuterVolumeSpecName: "scripts") pod "7f7e23b6-3379-4af2-a33b-2e1b12288561" (UID: "7f7e23b6-3379-4af2-a33b-2e1b12288561"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:33:32 crc kubenswrapper[4753]: I0129 12:33:32.970172 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/440fbaaa-a825-4b7c-b5f5-6cad02f05493-etc-swift\") pod \"swift-storage-0\" (UID: \"440fbaaa-a825-4b7c-b5f5-6cad02f05493\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:33:32 crc kubenswrapper[4753]: I0129 12:33:32.970311 4753 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7f7e23b6-3379-4af2-a33b-2e1b12288561-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 12:33:32 crc kubenswrapper[4753]: I0129 12:33:32.970328 4753 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7f7e23b6-3379-4af2-a33b-2e1b12288561-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 12:33:32 crc kubenswrapper[4753]: I0129 12:33:32.970341 4753 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/7f7e23b6-3379-4af2-a33b-2e1b12288561-etc-swift\") on node \"crc\" DevicePath \"\"" Jan 29 12:33:32 crc kubenswrapper[4753]: I0129 12:33:32.970354 4753 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/7f7e23b6-3379-4af2-a33b-2e1b12288561-swiftconf\") on node \"crc\" DevicePath \"\"" Jan 29 12:33:32 crc kubenswrapper[4753]: I0129 12:33:32.970365 4753 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/7f7e23b6-3379-4af2-a33b-2e1b12288561-ring-data-devices\") on node \"crc\" DevicePath \"\"" Jan 29 12:33:32 crc kubenswrapper[4753]: I0129 12:33:32.970379 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6gp48\" (UniqueName: \"kubernetes.io/projected/7f7e23b6-3379-4af2-a33b-2e1b12288561-kube-api-access-6gp48\") on node \"crc\" DevicePath \"\"" Jan 29 12:33:32 crc kubenswrapper[4753]: I0129 12:33:32.970396 4753 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/7f7e23b6-3379-4af2-a33b-2e1b12288561-dispersionconf\") on node \"crc\" DevicePath \"\"" Jan 29 12:33:32 crc kubenswrapper[4753]: I0129 12:33:32.975281 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/440fbaaa-a825-4b7c-b5f5-6cad02f05493-etc-swift\") pod \"swift-storage-0\" (UID: \"440fbaaa-a825-4b7c-b5f5-6cad02f05493\") " pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:33:33 crc kubenswrapper[4753]: I0129 12:33:33.140426 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:33:33 crc kubenswrapper[4753]: I0129 12:33:33.478367 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9qkh7" event={"ID":"aadb20d7-4116-47ca-9c1e-41d54eb80a6a","Type":"ContainerStarted","Data":"ff7145cf0e1ae9456b4eba1a0a6d7e74359659cb6d13bc0071e0353774472720"} Jan 29 12:33:33 crc kubenswrapper[4753]: I0129 12:33:33.488361 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-ring-rebalance-jvrtt" event={"ID":"7f7e23b6-3379-4af2-a33b-2e1b12288561","Type":"ContainerDied","Data":"a1d1e74baad32171276f9714c7ea2822f176dbdbeef1fcf774facda25ff25814"} Jan 29 12:33:33 crc kubenswrapper[4753]: I0129 12:33:33.488452 4753 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a1d1e74baad32171276f9714c7ea2822f176dbdbeef1fcf774facda25ff25814" Jan 29 12:33:33 crc kubenswrapper[4753]: I0129 12:33:33.488585 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-ring-rebalance-jvrtt" Jan 29 12:33:33 crc kubenswrapper[4753]: I0129 12:33:33.517857 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-9qkh7" podStartSLOduration=2.8136119170000002 podStartE2EDuration="9.517803611s" podCreationTimestamp="2026-01-29 12:33:24 +0000 UTC" firstStartedPulling="2026-01-29 12:33:26.365996012 +0000 UTC m=+1620.618077467" lastFinishedPulling="2026-01-29 12:33:33.070187706 +0000 UTC m=+1627.322269161" observedRunningTime="2026-01-29 12:33:33.511092698 +0000 UTC m=+1627.763174153" watchObservedRunningTime="2026-01-29 12:33:33.517803611 +0000 UTC m=+1627.769885066" Jan 29 12:33:33 crc kubenswrapper[4753]: I0129 12:33:33.641956 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/swift-storage-0"] Jan 29 12:33:34 crc kubenswrapper[4753]: I0129 12:33:34.117685 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/701dafed-ce7f-4a89-982d-f243757d097f-etc-swift\") pod \"swift-proxy-6dc98fc8fb-98r45\" (UID: \"701dafed-ce7f-4a89-982d-f243757d097f\") " pod="swift-kuttl-tests/swift-proxy-6dc98fc8fb-98r45" Jan 29 12:33:34 crc kubenswrapper[4753]: I0129 12:33:34.123692 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/701dafed-ce7f-4a89-982d-f243757d097f-etc-swift\") pod \"swift-proxy-6dc98fc8fb-98r45\" (UID: \"701dafed-ce7f-4a89-982d-f243757d097f\") " pod="swift-kuttl-tests/swift-proxy-6dc98fc8fb-98r45" Jan 29 12:33:34 crc kubenswrapper[4753]: I0129 12:33:34.304547 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-proxy-6dc98fc8fb-98r45" Jan 29 12:33:34 crc kubenswrapper[4753]: I0129 12:33:34.511834 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"440fbaaa-a825-4b7c-b5f5-6cad02f05493","Type":"ContainerStarted","Data":"c6e88c5f166e867ae6aadcf20c840040fbb1dc60adc0d0e24432e34480530106"} Jan 29 12:33:34 crc kubenswrapper[4753]: I0129 12:33:34.511882 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"440fbaaa-a825-4b7c-b5f5-6cad02f05493","Type":"ContainerStarted","Data":"bc473b86e445d2aedcc62ad3ef717571ca0e7e280c21cdabf66085756548e8e0"} Jan 29 12:33:34 crc kubenswrapper[4753]: I0129 12:33:34.852356 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/swift-proxy-6dc98fc8fb-98r45"] Jan 29 12:33:34 crc kubenswrapper[4753]: W0129 12:33:34.857950 4753 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod701dafed_ce7f_4a89_982d_f243757d097f.slice/crio-1073f3f040f7c8bc61f1fcc62ae69bcdf732c162c51ab1864122fdee932ef251 WatchSource:0}: Error finding container 1073f3f040f7c8bc61f1fcc62ae69bcdf732c162c51ab1864122fdee932ef251: Status 404 returned error can't find the container with id 1073f3f040f7c8bc61f1fcc62ae69bcdf732c162c51ab1864122fdee932ef251 Jan 29 12:33:34 crc kubenswrapper[4753]: I0129 12:33:34.978100 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-9qkh7" Jan 29 12:33:34 crc kubenswrapper[4753]: I0129 12:33:34.978178 4753 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-9qkh7" Jan 29 12:33:35 crc kubenswrapper[4753]: I0129 12:33:35.528441 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"440fbaaa-a825-4b7c-b5f5-6cad02f05493","Type":"ContainerStarted","Data":"e217bdffa73d03c29ee18c168cdec6a8b4e9c536de16348101bdeba622564205"} Jan 29 12:33:35 crc kubenswrapper[4753]: I0129 12:33:35.528811 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"440fbaaa-a825-4b7c-b5f5-6cad02f05493","Type":"ContainerStarted","Data":"a41389a7fe0155c69f23818035276e36fa96b77ccb336676bb2c0834dacf2966"} Jan 29 12:33:35 crc kubenswrapper[4753]: I0129 12:33:35.530259 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-proxy-6dc98fc8fb-98r45" event={"ID":"701dafed-ce7f-4a89-982d-f243757d097f","Type":"ContainerStarted","Data":"959dd00575d11c76d373a5d72eefd75b0f075ce3ca4fd9b3e55919627e89ae13"} Jan 29 12:33:35 crc kubenswrapper[4753]: I0129 12:33:35.530318 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-proxy-6dc98fc8fb-98r45" event={"ID":"701dafed-ce7f-4a89-982d-f243757d097f","Type":"ContainerStarted","Data":"1073f3f040f7c8bc61f1fcc62ae69bcdf732c162c51ab1864122fdee932ef251"} Jan 29 12:33:36 crc kubenswrapper[4753]: I0129 12:33:36.023770 4753 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-9qkh7" podUID="aadb20d7-4116-47ca-9c1e-41d54eb80a6a" containerName="registry-server" probeResult="failure" output=< Jan 29 12:33:36 crc kubenswrapper[4753]: timeout: failed to connect service ":50051" within 1s Jan 29 12:33:36 crc kubenswrapper[4753]: > Jan 29 12:33:37 crc kubenswrapper[4753]: I0129 12:33:37.901496 4753 scope.go:117] "RemoveContainer" containerID="00211d2562b6632acb04decda9045cbb8d3216adca69cd5bc1380ec42b8211e0" Jan 29 12:33:37 crc kubenswrapper[4753]: E0129 12:33:37.902270 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7c24x_openshift-machine-config-operator(b0310995-a7c7-47c3-ae6c-05daaaba92a6)\"" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" podUID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" Jan 29 12:33:38 crc kubenswrapper[4753]: I0129 12:33:38.673529 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"440fbaaa-a825-4b7c-b5f5-6cad02f05493","Type":"ContainerStarted","Data":"a5249ab8050d2462f51d1dbfd594b60152e053e89d8c52cfbd2ef0c85b2a7d3b"} Jan 29 12:33:38 crc kubenswrapper[4753]: I0129 12:33:38.673854 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"440fbaaa-a825-4b7c-b5f5-6cad02f05493","Type":"ContainerStarted","Data":"11c13ea009f76d88d345a6564779313fa89bcd1b81422014e7b84a9eb66f971b"} Jan 29 12:33:38 crc kubenswrapper[4753]: I0129 12:33:38.673866 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"440fbaaa-a825-4b7c-b5f5-6cad02f05493","Type":"ContainerStarted","Data":"77d4d57081b842e9efb2e33c3236053740c90c43369d47da680dedcc95e4c562"} Jan 29 12:33:38 crc kubenswrapper[4753]: I0129 12:33:38.673879 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"440fbaaa-a825-4b7c-b5f5-6cad02f05493","Type":"ContainerStarted","Data":"3f287468af691c24dee7c6dcafdc234df722b2cc8996c2a8d2b114e4bc8a0670"} Jan 29 12:33:38 crc kubenswrapper[4753]: I0129 12:33:38.682364 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-proxy-6dc98fc8fb-98r45" event={"ID":"701dafed-ce7f-4a89-982d-f243757d097f","Type":"ContainerStarted","Data":"b750bffbee119f5ecce4790a0734dcf5806f4b767315476acdc2218f32f63848"} Jan 29 12:33:38 crc kubenswrapper[4753]: I0129 12:33:38.682563 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="swift-kuttl-tests/swift-proxy-6dc98fc8fb-98r45" Jan 29 12:33:38 crc kubenswrapper[4753]: I0129 12:33:38.682586 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="swift-kuttl-tests/swift-proxy-6dc98fc8fb-98r45" Jan 29 12:33:38 crc kubenswrapper[4753]: I0129 12:33:38.712643 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="swift-kuttl-tests/swift-proxy-6dc98fc8fb-98r45" podStartSLOduration=20.712615828 podStartE2EDuration="20.712615828s" podCreationTimestamp="2026-01-29 12:33:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:33:38.705797852 +0000 UTC m=+1632.957879297" watchObservedRunningTime="2026-01-29 12:33:38.712615828 +0000 UTC m=+1632.964697283" Jan 29 12:33:39 crc kubenswrapper[4753]: I0129 12:33:39.464792 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-5cvhh"] Jan 29 12:33:39 crc kubenswrapper[4753]: E0129 12:33:39.465460 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7f7e23b6-3379-4af2-a33b-2e1b12288561" containerName="swift-ring-rebalance" Jan 29 12:33:39 crc kubenswrapper[4753]: I0129 12:33:39.465477 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="7f7e23b6-3379-4af2-a33b-2e1b12288561" containerName="swift-ring-rebalance" Jan 29 12:33:39 crc kubenswrapper[4753]: I0129 12:33:39.465652 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="7f7e23b6-3379-4af2-a33b-2e1b12288561" containerName="swift-ring-rebalance" Jan 29 12:33:39 crc kubenswrapper[4753]: I0129 12:33:39.468620 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5cvhh" Jan 29 12:33:39 crc kubenswrapper[4753]: I0129 12:33:39.495196 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-5cvhh"] Jan 29 12:33:39 crc kubenswrapper[4753]: I0129 12:33:39.661327 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/03e6ef3e-14b4-4cb6-8eff-514f4b464ed1-utilities\") pod \"community-operators-5cvhh\" (UID: \"03e6ef3e-14b4-4cb6-8eff-514f4b464ed1\") " pod="openshift-marketplace/community-operators-5cvhh" Jan 29 12:33:39 crc kubenswrapper[4753]: I0129 12:33:39.661459 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/03e6ef3e-14b4-4cb6-8eff-514f4b464ed1-catalog-content\") pod \"community-operators-5cvhh\" (UID: \"03e6ef3e-14b4-4cb6-8eff-514f4b464ed1\") " pod="openshift-marketplace/community-operators-5cvhh" Jan 29 12:33:39 crc kubenswrapper[4753]: I0129 12:33:39.661499 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d4xdc\" (UniqueName: \"kubernetes.io/projected/03e6ef3e-14b4-4cb6-8eff-514f4b464ed1-kube-api-access-d4xdc\") pod \"community-operators-5cvhh\" (UID: \"03e6ef3e-14b4-4cb6-8eff-514f4b464ed1\") " pod="openshift-marketplace/community-operators-5cvhh" Jan 29 12:33:39 crc kubenswrapper[4753]: I0129 12:33:39.700831 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"440fbaaa-a825-4b7c-b5f5-6cad02f05493","Type":"ContainerStarted","Data":"2dcdb7bdea7d2a6d5b282fce60f0c19a6fd11fd604776be594edb82ce7cd9a4c"} Jan 29 12:33:39 crc kubenswrapper[4753]: I0129 12:33:39.700890 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"440fbaaa-a825-4b7c-b5f5-6cad02f05493","Type":"ContainerStarted","Data":"38ffd345e6e19bb944c2ed10f92c7403639786482a7a0b125342c246e8133282"} Jan 29 12:33:39 crc kubenswrapper[4753]: I0129 12:33:39.762747 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/03e6ef3e-14b4-4cb6-8eff-514f4b464ed1-utilities\") pod \"community-operators-5cvhh\" (UID: \"03e6ef3e-14b4-4cb6-8eff-514f4b464ed1\") " pod="openshift-marketplace/community-operators-5cvhh" Jan 29 12:33:39 crc kubenswrapper[4753]: I0129 12:33:39.762844 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/03e6ef3e-14b4-4cb6-8eff-514f4b464ed1-catalog-content\") pod \"community-operators-5cvhh\" (UID: \"03e6ef3e-14b4-4cb6-8eff-514f4b464ed1\") " pod="openshift-marketplace/community-operators-5cvhh" Jan 29 12:33:39 crc kubenswrapper[4753]: I0129 12:33:39.762878 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d4xdc\" (UniqueName: \"kubernetes.io/projected/03e6ef3e-14b4-4cb6-8eff-514f4b464ed1-kube-api-access-d4xdc\") pod \"community-operators-5cvhh\" (UID: \"03e6ef3e-14b4-4cb6-8eff-514f4b464ed1\") " pod="openshift-marketplace/community-operators-5cvhh" Jan 29 12:33:39 crc kubenswrapper[4753]: I0129 12:33:39.763402 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/03e6ef3e-14b4-4cb6-8eff-514f4b464ed1-utilities\") pod \"community-operators-5cvhh\" (UID: \"03e6ef3e-14b4-4cb6-8eff-514f4b464ed1\") " pod="openshift-marketplace/community-operators-5cvhh" Jan 29 12:33:39 crc kubenswrapper[4753]: I0129 12:33:39.763454 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/03e6ef3e-14b4-4cb6-8eff-514f4b464ed1-catalog-content\") pod \"community-operators-5cvhh\" (UID: \"03e6ef3e-14b4-4cb6-8eff-514f4b464ed1\") " pod="openshift-marketplace/community-operators-5cvhh" Jan 29 12:33:39 crc kubenswrapper[4753]: I0129 12:33:39.779860 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d4xdc\" (UniqueName: \"kubernetes.io/projected/03e6ef3e-14b4-4cb6-8eff-514f4b464ed1-kube-api-access-d4xdc\") pod \"community-operators-5cvhh\" (UID: \"03e6ef3e-14b4-4cb6-8eff-514f4b464ed1\") " pod="openshift-marketplace/community-operators-5cvhh" Jan 29 12:33:39 crc kubenswrapper[4753]: I0129 12:33:39.794386 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5cvhh" Jan 29 12:33:40 crc kubenswrapper[4753]: I0129 12:33:40.300113 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-5cvhh"] Jan 29 12:33:40 crc kubenswrapper[4753]: W0129 12:33:40.313573 4753 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod03e6ef3e_14b4_4cb6_8eff_514f4b464ed1.slice/crio-89d6782ddc3bd90292a12550dc0ed65c236494808915cbd46c81a5eb2731ac6d WatchSource:0}: Error finding container 89d6782ddc3bd90292a12550dc0ed65c236494808915cbd46c81a5eb2731ac6d: Status 404 returned error can't find the container with id 89d6782ddc3bd90292a12550dc0ed65c236494808915cbd46c81a5eb2731ac6d Jan 29 12:33:40 crc kubenswrapper[4753]: I0129 12:33:40.711504 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5cvhh" event={"ID":"03e6ef3e-14b4-4cb6-8eff-514f4b464ed1","Type":"ContainerStarted","Data":"89d6782ddc3bd90292a12550dc0ed65c236494808915cbd46c81a5eb2731ac6d"} Jan 29 12:33:40 crc kubenswrapper[4753]: I0129 12:33:40.721460 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"440fbaaa-a825-4b7c-b5f5-6cad02f05493","Type":"ContainerStarted","Data":"b9a8c83cc567f746d39090743df2241dbec28eadb1b87985af13a50bd159fdcf"} Jan 29 12:33:40 crc kubenswrapper[4753]: I0129 12:33:40.721520 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"440fbaaa-a825-4b7c-b5f5-6cad02f05493","Type":"ContainerStarted","Data":"738fba01dd53bc3b8267f8fea84ccec85ed8fa14f9fc31f54c1e042155152011"} Jan 29 12:33:40 crc kubenswrapper[4753]: E0129 12:33:40.862147 4753 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod03e6ef3e_14b4_4cb6_8eff_514f4b464ed1.slice/crio-conmon-3ed9c8cd2d7a775487c935f442e66a0d4db41bfea51913db820346be7b4b71d5.scope\": RecentStats: unable to find data in memory cache]" Jan 29 12:33:41 crc kubenswrapper[4753]: I0129 12:33:41.748322 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"440fbaaa-a825-4b7c-b5f5-6cad02f05493","Type":"ContainerStarted","Data":"2f98b86bc7ca6dd5f12c68ae372c3b08c915a79d4a412be9a0dc8912b8a2f306"} Jan 29 12:33:41 crc kubenswrapper[4753]: I0129 12:33:41.749948 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"440fbaaa-a825-4b7c-b5f5-6cad02f05493","Type":"ContainerStarted","Data":"55f17debcca873fa0e4b350e37f48eccf285bd6343d9100b65c1d6c15e327c17"} Jan 29 12:33:41 crc kubenswrapper[4753]: I0129 12:33:41.750052 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5cvhh" event={"ID":"03e6ef3e-14b4-4cb6-8eff-514f4b464ed1","Type":"ContainerDied","Data":"3ed9c8cd2d7a775487c935f442e66a0d4db41bfea51913db820346be7b4b71d5"} Jan 29 12:33:41 crc kubenswrapper[4753]: I0129 12:33:41.750045 4753 generic.go:334] "Generic (PLEG): container finished" podID="03e6ef3e-14b4-4cb6-8eff-514f4b464ed1" containerID="3ed9c8cd2d7a775487c935f442e66a0d4db41bfea51913db820346be7b4b71d5" exitCode=0 Jan 29 12:33:42 crc kubenswrapper[4753]: I0129 12:33:42.767823 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"440fbaaa-a825-4b7c-b5f5-6cad02f05493","Type":"ContainerStarted","Data":"6ed6a67656688240784161e2afe42b71bae3c0303ed38ffaa323ceac1ef5904e"} Jan 29 12:33:43 crc kubenswrapper[4753]: I0129 12:33:43.780900 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5cvhh" event={"ID":"03e6ef3e-14b4-4cb6-8eff-514f4b464ed1","Type":"ContainerStarted","Data":"f6af4b94bc866dc19aa2e18845cf2d9c8eb9767390915622a3a3f09c76dbeff7"} Jan 29 12:33:43 crc kubenswrapper[4753]: I0129 12:33:43.801169 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"440fbaaa-a825-4b7c-b5f5-6cad02f05493","Type":"ContainerStarted","Data":"900df4746b0918c8eb5088f4f6d4c404b574f9420a7a18b72696e8ecaf09f44c"} Jan 29 12:33:43 crc kubenswrapper[4753]: I0129 12:33:43.902904 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="swift-kuttl-tests/swift-storage-0" podStartSLOduration=28.902860315 podStartE2EDuration="28.902860315s" podCreationTimestamp="2026-01-29 12:33:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:33:43.889691997 +0000 UTC m=+1638.141773472" watchObservedRunningTime="2026-01-29 12:33:43.902860315 +0000 UTC m=+1638.154941770" Jan 29 12:33:44 crc kubenswrapper[4753]: I0129 12:33:44.316333 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="swift-kuttl-tests/swift-proxy-6dc98fc8fb-98r45" Jan 29 12:33:44 crc kubenswrapper[4753]: I0129 12:33:44.317120 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="swift-kuttl-tests/swift-proxy-6dc98fc8fb-98r45" Jan 29 12:33:44 crc kubenswrapper[4753]: I0129 12:33:44.811362 4753 generic.go:334] "Generic (PLEG): container finished" podID="03e6ef3e-14b4-4cb6-8eff-514f4b464ed1" containerID="f6af4b94bc866dc19aa2e18845cf2d9c8eb9767390915622a3a3f09c76dbeff7" exitCode=0 Jan 29 12:33:44 crc kubenswrapper[4753]: I0129 12:33:44.811432 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5cvhh" event={"ID":"03e6ef3e-14b4-4cb6-8eff-514f4b464ed1","Type":"ContainerDied","Data":"f6af4b94bc866dc19aa2e18845cf2d9c8eb9767390915622a3a3f09c76dbeff7"} Jan 29 12:33:45 crc kubenswrapper[4753]: I0129 12:33:45.025362 4753 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-9qkh7" Jan 29 12:33:45 crc kubenswrapper[4753]: I0129 12:33:45.073650 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-9qkh7" Jan 29 12:33:45 crc kubenswrapper[4753]: I0129 12:33:45.841743 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-9qkh7"] Jan 29 12:33:45 crc kubenswrapper[4753]: I0129 12:33:45.950181 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["swift-kuttl-tests/swift-storage-0"] Jan 29 12:33:45 crc kubenswrapper[4753]: I0129 12:33:45.950679 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-0" podUID="440fbaaa-a825-4b7c-b5f5-6cad02f05493" containerName="account-server" containerID="cri-o://c6e88c5f166e867ae6aadcf20c840040fbb1dc60adc0d0e24432e34480530106" gracePeriod=30 Jan 29 12:33:45 crc kubenswrapper[4753]: I0129 12:33:45.950827 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-0" podUID="440fbaaa-a825-4b7c-b5f5-6cad02f05493" containerName="account-replicator" containerID="cri-o://a41389a7fe0155c69f23818035276e36fa96b77ccb336676bb2c0834dacf2966" gracePeriod=30 Jan 29 12:33:45 crc kubenswrapper[4753]: I0129 12:33:45.950810 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-0" podUID="440fbaaa-a825-4b7c-b5f5-6cad02f05493" containerName="object-server" containerID="cri-o://2dcdb7bdea7d2a6d5b282fce60f0c19a6fd11fd604776be594edb82ce7cd9a4c" gracePeriod=30 Jan 29 12:33:45 crc kubenswrapper[4753]: I0129 12:33:45.950829 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-0" podUID="440fbaaa-a825-4b7c-b5f5-6cad02f05493" containerName="container-server" containerID="cri-o://3f287468af691c24dee7c6dcafdc234df722b2cc8996c2a8d2b114e4bc8a0670" gracePeriod=30 Jan 29 12:33:45 crc kubenswrapper[4753]: I0129 12:33:45.950942 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-0" podUID="440fbaaa-a825-4b7c-b5f5-6cad02f05493" containerName="container-replicator" containerID="cri-o://77d4d57081b842e9efb2e33c3236053740c90c43369d47da680dedcc95e4c562" gracePeriod=30 Jan 29 12:33:45 crc kubenswrapper[4753]: I0129 12:33:45.950952 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-0" podUID="440fbaaa-a825-4b7c-b5f5-6cad02f05493" containerName="object-replicator" containerID="cri-o://738fba01dd53bc3b8267f8fea84ccec85ed8fa14f9fc31f54c1e042155152011" gracePeriod=30 Jan 29 12:33:45 crc kubenswrapper[4753]: I0129 12:33:45.950959 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-0" podUID="440fbaaa-a825-4b7c-b5f5-6cad02f05493" containerName="container-auditor" containerID="cri-o://11c13ea009f76d88d345a6564779313fa89bcd1b81422014e7b84a9eb66f971b" gracePeriod=30 Jan 29 12:33:45 crc kubenswrapper[4753]: I0129 12:33:45.950942 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-0" podUID="440fbaaa-a825-4b7c-b5f5-6cad02f05493" containerName="object-updater" containerID="cri-o://55f17debcca873fa0e4b350e37f48eccf285bd6343d9100b65c1d6c15e327c17" gracePeriod=30 Jan 29 12:33:45 crc kubenswrapper[4753]: I0129 12:33:45.950880 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-0" podUID="440fbaaa-a825-4b7c-b5f5-6cad02f05493" containerName="swift-recon-cron" containerID="cri-o://900df4746b0918c8eb5088f4f6d4c404b574f9420a7a18b72696e8ecaf09f44c" gracePeriod=30 Jan 29 12:33:45 crc kubenswrapper[4753]: I0129 12:33:45.950943 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-0" podUID="440fbaaa-a825-4b7c-b5f5-6cad02f05493" containerName="account-reaper" containerID="cri-o://a5249ab8050d2462f51d1dbfd594b60152e053e89d8c52cfbd2ef0c85b2a7d3b" gracePeriod=30 Jan 29 12:33:45 crc kubenswrapper[4753]: I0129 12:33:45.951008 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-0" podUID="440fbaaa-a825-4b7c-b5f5-6cad02f05493" containerName="container-updater" containerID="cri-o://38ffd345e6e19bb944c2ed10f92c7403639786482a7a0b125342c246e8133282" gracePeriod=30 Jan 29 12:33:45 crc kubenswrapper[4753]: I0129 12:33:45.950873 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-0" podUID="440fbaaa-a825-4b7c-b5f5-6cad02f05493" containerName="rsync" containerID="cri-o://6ed6a67656688240784161e2afe42b71bae3c0303ed38ffaa323ceac1ef5904e" gracePeriod=30 Jan 29 12:33:45 crc kubenswrapper[4753]: I0129 12:33:45.950931 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-0" podUID="440fbaaa-a825-4b7c-b5f5-6cad02f05493" containerName="object-auditor" containerID="cri-o://b9a8c83cc567f746d39090743df2241dbec28eadb1b87985af13a50bd159fdcf" gracePeriod=30 Jan 29 12:33:45 crc kubenswrapper[4753]: I0129 12:33:45.950793 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-0" podUID="440fbaaa-a825-4b7c-b5f5-6cad02f05493" containerName="object-expirer" containerID="cri-o://2f98b86bc7ca6dd5f12c68ae372c3b08c915a79d4a412be9a0dc8912b8a2f306" gracePeriod=30 Jan 29 12:33:45 crc kubenswrapper[4753]: I0129 12:33:45.951312 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-0" podUID="440fbaaa-a825-4b7c-b5f5-6cad02f05493" containerName="account-auditor" containerID="cri-o://e217bdffa73d03c29ee18c168cdec6a8b4e9c536de16348101bdeba622564205" gracePeriod=30 Jan 29 12:33:45 crc kubenswrapper[4753]: I0129 12:33:45.965083 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["swift-kuttl-tests/swift-ring-rebalance-jvrtt"] Jan 29 12:33:45 crc kubenswrapper[4753]: I0129 12:33:45.985884 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["swift-kuttl-tests/swift-ring-rebalance-jvrtt"] Jan 29 12:33:46 crc kubenswrapper[4753]: I0129 12:33:46.024961 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["swift-kuttl-tests/swift-proxy-6dc98fc8fb-98r45"] Jan 29 12:33:46 crc kubenswrapper[4753]: I0129 12:33:46.025259 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-proxy-6dc98fc8fb-98r45" podUID="701dafed-ce7f-4a89-982d-f243757d097f" containerName="proxy-httpd" containerID="cri-o://959dd00575d11c76d373a5d72eefd75b0f075ce3ca4fd9b3e55919627e89ae13" gracePeriod=30 Jan 29 12:33:46 crc kubenswrapper[4753]: I0129 12:33:46.025344 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-proxy-6dc98fc8fb-98r45" podUID="701dafed-ce7f-4a89-982d-f243757d097f" containerName="proxy-server" containerID="cri-o://b750bffbee119f5ecce4790a0734dcf5806f4b767315476acdc2218f32f63848" gracePeriod=30 Jan 29 12:33:46 crc kubenswrapper[4753]: I0129 12:33:46.832456 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5cvhh" event={"ID":"03e6ef3e-14b4-4cb6-8eff-514f4b464ed1","Type":"ContainerStarted","Data":"c6e7b49e87598e4ca5111f79014f8089db21ac83078f12bd5bfe8413ff3cde6f"} Jan 29 12:33:46 crc kubenswrapper[4753]: I0129 12:33:46.857597 4753 generic.go:334] "Generic (PLEG): container finished" podID="440fbaaa-a825-4b7c-b5f5-6cad02f05493" containerID="6ed6a67656688240784161e2afe42b71bae3c0303ed38ffaa323ceac1ef5904e" exitCode=0 Jan 29 12:33:46 crc kubenswrapper[4753]: I0129 12:33:46.857638 4753 generic.go:334] "Generic (PLEG): container finished" podID="440fbaaa-a825-4b7c-b5f5-6cad02f05493" containerID="2f98b86bc7ca6dd5f12c68ae372c3b08c915a79d4a412be9a0dc8912b8a2f306" exitCode=0 Jan 29 12:33:46 crc kubenswrapper[4753]: I0129 12:33:46.857649 4753 generic.go:334] "Generic (PLEG): container finished" podID="440fbaaa-a825-4b7c-b5f5-6cad02f05493" containerID="55f17debcca873fa0e4b350e37f48eccf285bd6343d9100b65c1d6c15e327c17" exitCode=0 Jan 29 12:33:46 crc kubenswrapper[4753]: I0129 12:33:46.857658 4753 generic.go:334] "Generic (PLEG): container finished" podID="440fbaaa-a825-4b7c-b5f5-6cad02f05493" containerID="b9a8c83cc567f746d39090743df2241dbec28eadb1b87985af13a50bd159fdcf" exitCode=0 Jan 29 12:33:46 crc kubenswrapper[4753]: I0129 12:33:46.857666 4753 generic.go:334] "Generic (PLEG): container finished" podID="440fbaaa-a825-4b7c-b5f5-6cad02f05493" containerID="738fba01dd53bc3b8267f8fea84ccec85ed8fa14f9fc31f54c1e042155152011" exitCode=0 Jan 29 12:33:46 crc kubenswrapper[4753]: I0129 12:33:46.857678 4753 generic.go:334] "Generic (PLEG): container finished" podID="440fbaaa-a825-4b7c-b5f5-6cad02f05493" containerID="2dcdb7bdea7d2a6d5b282fce60f0c19a6fd11fd604776be594edb82ce7cd9a4c" exitCode=0 Jan 29 12:33:46 crc kubenswrapper[4753]: I0129 12:33:46.857686 4753 generic.go:334] "Generic (PLEG): container finished" podID="440fbaaa-a825-4b7c-b5f5-6cad02f05493" containerID="38ffd345e6e19bb944c2ed10f92c7403639786482a7a0b125342c246e8133282" exitCode=0 Jan 29 12:33:46 crc kubenswrapper[4753]: I0129 12:33:46.857693 4753 generic.go:334] "Generic (PLEG): container finished" podID="440fbaaa-a825-4b7c-b5f5-6cad02f05493" containerID="11c13ea009f76d88d345a6564779313fa89bcd1b81422014e7b84a9eb66f971b" exitCode=0 Jan 29 12:33:46 crc kubenswrapper[4753]: I0129 12:33:46.857701 4753 generic.go:334] "Generic (PLEG): container finished" podID="440fbaaa-a825-4b7c-b5f5-6cad02f05493" containerID="77d4d57081b842e9efb2e33c3236053740c90c43369d47da680dedcc95e4c562" exitCode=0 Jan 29 12:33:46 crc kubenswrapper[4753]: I0129 12:33:46.857775 4753 generic.go:334] "Generic (PLEG): container finished" podID="440fbaaa-a825-4b7c-b5f5-6cad02f05493" containerID="3f287468af691c24dee7c6dcafdc234df722b2cc8996c2a8d2b114e4bc8a0670" exitCode=0 Jan 29 12:33:46 crc kubenswrapper[4753]: I0129 12:33:46.857787 4753 generic.go:334] "Generic (PLEG): container finished" podID="440fbaaa-a825-4b7c-b5f5-6cad02f05493" containerID="a5249ab8050d2462f51d1dbfd594b60152e053e89d8c52cfbd2ef0c85b2a7d3b" exitCode=0 Jan 29 12:33:46 crc kubenswrapper[4753]: I0129 12:33:46.857796 4753 generic.go:334] "Generic (PLEG): container finished" podID="440fbaaa-a825-4b7c-b5f5-6cad02f05493" containerID="e217bdffa73d03c29ee18c168cdec6a8b4e9c536de16348101bdeba622564205" exitCode=0 Jan 29 12:33:46 crc kubenswrapper[4753]: I0129 12:33:46.857804 4753 generic.go:334] "Generic (PLEG): container finished" podID="440fbaaa-a825-4b7c-b5f5-6cad02f05493" containerID="a41389a7fe0155c69f23818035276e36fa96b77ccb336676bb2c0834dacf2966" exitCode=0 Jan 29 12:33:46 crc kubenswrapper[4753]: I0129 12:33:46.857814 4753 generic.go:334] "Generic (PLEG): container finished" podID="440fbaaa-a825-4b7c-b5f5-6cad02f05493" containerID="c6e88c5f166e867ae6aadcf20c840040fbb1dc60adc0d0e24432e34480530106" exitCode=0 Jan 29 12:33:46 crc kubenswrapper[4753]: I0129 12:33:46.857908 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"440fbaaa-a825-4b7c-b5f5-6cad02f05493","Type":"ContainerDied","Data":"6ed6a67656688240784161e2afe42b71bae3c0303ed38ffaa323ceac1ef5904e"} Jan 29 12:33:46 crc kubenswrapper[4753]: I0129 12:33:46.857948 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"440fbaaa-a825-4b7c-b5f5-6cad02f05493","Type":"ContainerDied","Data":"2f98b86bc7ca6dd5f12c68ae372c3b08c915a79d4a412be9a0dc8912b8a2f306"} Jan 29 12:33:46 crc kubenswrapper[4753]: I0129 12:33:46.857964 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"440fbaaa-a825-4b7c-b5f5-6cad02f05493","Type":"ContainerDied","Data":"55f17debcca873fa0e4b350e37f48eccf285bd6343d9100b65c1d6c15e327c17"} Jan 29 12:33:46 crc kubenswrapper[4753]: I0129 12:33:46.857976 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"440fbaaa-a825-4b7c-b5f5-6cad02f05493","Type":"ContainerDied","Data":"b9a8c83cc567f746d39090743df2241dbec28eadb1b87985af13a50bd159fdcf"} Jan 29 12:33:46 crc kubenswrapper[4753]: I0129 12:33:46.858004 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"440fbaaa-a825-4b7c-b5f5-6cad02f05493","Type":"ContainerDied","Data":"738fba01dd53bc3b8267f8fea84ccec85ed8fa14f9fc31f54c1e042155152011"} Jan 29 12:33:46 crc kubenswrapper[4753]: I0129 12:33:46.858017 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"440fbaaa-a825-4b7c-b5f5-6cad02f05493","Type":"ContainerDied","Data":"2dcdb7bdea7d2a6d5b282fce60f0c19a6fd11fd604776be594edb82ce7cd9a4c"} Jan 29 12:33:46 crc kubenswrapper[4753]: I0129 12:33:46.858029 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"440fbaaa-a825-4b7c-b5f5-6cad02f05493","Type":"ContainerDied","Data":"38ffd345e6e19bb944c2ed10f92c7403639786482a7a0b125342c246e8133282"} Jan 29 12:33:46 crc kubenswrapper[4753]: I0129 12:33:46.858039 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"440fbaaa-a825-4b7c-b5f5-6cad02f05493","Type":"ContainerDied","Data":"11c13ea009f76d88d345a6564779313fa89bcd1b81422014e7b84a9eb66f971b"} Jan 29 12:33:46 crc kubenswrapper[4753]: I0129 12:33:46.858051 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"440fbaaa-a825-4b7c-b5f5-6cad02f05493","Type":"ContainerDied","Data":"77d4d57081b842e9efb2e33c3236053740c90c43369d47da680dedcc95e4c562"} Jan 29 12:33:46 crc kubenswrapper[4753]: I0129 12:33:46.858061 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"440fbaaa-a825-4b7c-b5f5-6cad02f05493","Type":"ContainerDied","Data":"3f287468af691c24dee7c6dcafdc234df722b2cc8996c2a8d2b114e4bc8a0670"} Jan 29 12:33:46 crc kubenswrapper[4753]: I0129 12:33:46.858073 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"440fbaaa-a825-4b7c-b5f5-6cad02f05493","Type":"ContainerDied","Data":"a5249ab8050d2462f51d1dbfd594b60152e053e89d8c52cfbd2ef0c85b2a7d3b"} Jan 29 12:33:46 crc kubenswrapper[4753]: I0129 12:33:46.858083 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"440fbaaa-a825-4b7c-b5f5-6cad02f05493","Type":"ContainerDied","Data":"e217bdffa73d03c29ee18c168cdec6a8b4e9c536de16348101bdeba622564205"} Jan 29 12:33:46 crc kubenswrapper[4753]: I0129 12:33:46.858096 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"440fbaaa-a825-4b7c-b5f5-6cad02f05493","Type":"ContainerDied","Data":"a41389a7fe0155c69f23818035276e36fa96b77ccb336676bb2c0834dacf2966"} Jan 29 12:33:46 crc kubenswrapper[4753]: I0129 12:33:46.858108 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"440fbaaa-a825-4b7c-b5f5-6cad02f05493","Type":"ContainerDied","Data":"c6e88c5f166e867ae6aadcf20c840040fbb1dc60adc0d0e24432e34480530106"} Jan 29 12:33:46 crc kubenswrapper[4753]: I0129 12:33:46.858651 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-5cvhh" podStartSLOduration=3.344089988 podStartE2EDuration="7.8586317s" podCreationTimestamp="2026-01-29 12:33:39 +0000 UTC" firstStartedPulling="2026-01-29 12:33:41.751678115 +0000 UTC m=+1636.003759560" lastFinishedPulling="2026-01-29 12:33:46.266219817 +0000 UTC m=+1640.518301272" observedRunningTime="2026-01-29 12:33:46.858331061 +0000 UTC m=+1641.110412516" watchObservedRunningTime="2026-01-29 12:33:46.8586317 +0000 UTC m=+1641.110713155" Jan 29 12:33:46 crc kubenswrapper[4753]: I0129 12:33:46.862359 4753 generic.go:334] "Generic (PLEG): container finished" podID="701dafed-ce7f-4a89-982d-f243757d097f" containerID="b750bffbee119f5ecce4790a0734dcf5806f4b767315476acdc2218f32f63848" exitCode=0 Jan 29 12:33:46 crc kubenswrapper[4753]: I0129 12:33:46.862398 4753 generic.go:334] "Generic (PLEG): container finished" podID="701dafed-ce7f-4a89-982d-f243757d097f" containerID="959dd00575d11c76d373a5d72eefd75b0f075ce3ca4fd9b3e55919627e89ae13" exitCode=0 Jan 29 12:33:46 crc kubenswrapper[4753]: I0129 12:33:46.862457 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-proxy-6dc98fc8fb-98r45" event={"ID":"701dafed-ce7f-4a89-982d-f243757d097f","Type":"ContainerDied","Data":"b750bffbee119f5ecce4790a0734dcf5806f4b767315476acdc2218f32f63848"} Jan 29 12:33:46 crc kubenswrapper[4753]: I0129 12:33:46.862522 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-proxy-6dc98fc8fb-98r45" event={"ID":"701dafed-ce7f-4a89-982d-f243757d097f","Type":"ContainerDied","Data":"959dd00575d11c76d373a5d72eefd75b0f075ce3ca4fd9b3e55919627e89ae13"} Jan 29 12:33:46 crc kubenswrapper[4753]: I0129 12:33:46.862636 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-9qkh7" podUID="aadb20d7-4116-47ca-9c1e-41d54eb80a6a" containerName="registry-server" containerID="cri-o://ff7145cf0e1ae9456b4eba1a0a6d7e74359659cb6d13bc0071e0353774472720" gracePeriod=2 Jan 29 12:33:47 crc kubenswrapper[4753]: I0129 12:33:47.025502 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-proxy-6dc98fc8fb-98r45" Jan 29 12:33:47 crc kubenswrapper[4753]: I0129 12:33:47.215524 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/701dafed-ce7f-4a89-982d-f243757d097f-config-data\") pod \"701dafed-ce7f-4a89-982d-f243757d097f\" (UID: \"701dafed-ce7f-4a89-982d-f243757d097f\") " Jan 29 12:33:47 crc kubenswrapper[4753]: I0129 12:33:47.215576 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/701dafed-ce7f-4a89-982d-f243757d097f-public-tls-certs\") pod \"701dafed-ce7f-4a89-982d-f243757d097f\" (UID: \"701dafed-ce7f-4a89-982d-f243757d097f\") " Jan 29 12:33:47 crc kubenswrapper[4753]: I0129 12:33:47.215668 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z9sdn\" (UniqueName: \"kubernetes.io/projected/701dafed-ce7f-4a89-982d-f243757d097f-kube-api-access-z9sdn\") pod \"701dafed-ce7f-4a89-982d-f243757d097f\" (UID: \"701dafed-ce7f-4a89-982d-f243757d097f\") " Jan 29 12:33:47 crc kubenswrapper[4753]: I0129 12:33:47.215715 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/701dafed-ce7f-4a89-982d-f243757d097f-log-httpd\") pod \"701dafed-ce7f-4a89-982d-f243757d097f\" (UID: \"701dafed-ce7f-4a89-982d-f243757d097f\") " Jan 29 12:33:47 crc kubenswrapper[4753]: I0129 12:33:47.215754 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/701dafed-ce7f-4a89-982d-f243757d097f-etc-swift\") pod \"701dafed-ce7f-4a89-982d-f243757d097f\" (UID: \"701dafed-ce7f-4a89-982d-f243757d097f\") " Jan 29 12:33:47 crc kubenswrapper[4753]: I0129 12:33:47.215812 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/701dafed-ce7f-4a89-982d-f243757d097f-internal-tls-certs\") pod \"701dafed-ce7f-4a89-982d-f243757d097f\" (UID: \"701dafed-ce7f-4a89-982d-f243757d097f\") " Jan 29 12:33:47 crc kubenswrapper[4753]: I0129 12:33:47.215830 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/701dafed-ce7f-4a89-982d-f243757d097f-run-httpd\") pod \"701dafed-ce7f-4a89-982d-f243757d097f\" (UID: \"701dafed-ce7f-4a89-982d-f243757d097f\") " Jan 29 12:33:47 crc kubenswrapper[4753]: I0129 12:33:47.215852 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/701dafed-ce7f-4a89-982d-f243757d097f-combined-ca-bundle\") pod \"701dafed-ce7f-4a89-982d-f243757d097f\" (UID: \"701dafed-ce7f-4a89-982d-f243757d097f\") " Jan 29 12:33:47 crc kubenswrapper[4753]: I0129 12:33:47.221121 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/701dafed-ce7f-4a89-982d-f243757d097f-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "701dafed-ce7f-4a89-982d-f243757d097f" (UID: "701dafed-ce7f-4a89-982d-f243757d097f"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:33:47 crc kubenswrapper[4753]: I0129 12:33:47.221158 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/701dafed-ce7f-4a89-982d-f243757d097f-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "701dafed-ce7f-4a89-982d-f243757d097f" (UID: "701dafed-ce7f-4a89-982d-f243757d097f"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:33:47 crc kubenswrapper[4753]: I0129 12:33:47.236874 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/701dafed-ce7f-4a89-982d-f243757d097f-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "701dafed-ce7f-4a89-982d-f243757d097f" (UID: "701dafed-ce7f-4a89-982d-f243757d097f"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:33:47 crc kubenswrapper[4753]: I0129 12:33:47.237288 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/701dafed-ce7f-4a89-982d-f243757d097f-kube-api-access-z9sdn" (OuterVolumeSpecName: "kube-api-access-z9sdn") pod "701dafed-ce7f-4a89-982d-f243757d097f" (UID: "701dafed-ce7f-4a89-982d-f243757d097f"). InnerVolumeSpecName "kube-api-access-z9sdn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:33:47 crc kubenswrapper[4753]: I0129 12:33:47.274993 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/701dafed-ce7f-4a89-982d-f243757d097f-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "701dafed-ce7f-4a89-982d-f243757d097f" (UID: "701dafed-ce7f-4a89-982d-f243757d097f"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:33:47 crc kubenswrapper[4753]: I0129 12:33:47.283448 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/701dafed-ce7f-4a89-982d-f243757d097f-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "701dafed-ce7f-4a89-982d-f243757d097f" (UID: "701dafed-ce7f-4a89-982d-f243757d097f"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:33:47 crc kubenswrapper[4753]: I0129 12:33:47.287446 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/701dafed-ce7f-4a89-982d-f243757d097f-config-data" (OuterVolumeSpecName: "config-data") pod "701dafed-ce7f-4a89-982d-f243757d097f" (UID: "701dafed-ce7f-4a89-982d-f243757d097f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:33:47 crc kubenswrapper[4753]: I0129 12:33:47.300551 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/701dafed-ce7f-4a89-982d-f243757d097f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "701dafed-ce7f-4a89-982d-f243757d097f" (UID: "701dafed-ce7f-4a89-982d-f243757d097f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:33:47 crc kubenswrapper[4753]: I0129 12:33:47.317000 4753 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/701dafed-ce7f-4a89-982d-f243757d097f-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 12:33:47 crc kubenswrapper[4753]: I0129 12:33:47.317481 4753 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/701dafed-ce7f-4a89-982d-f243757d097f-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 12:33:47 crc kubenswrapper[4753]: I0129 12:33:47.318219 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z9sdn\" (UniqueName: \"kubernetes.io/projected/701dafed-ce7f-4a89-982d-f243757d097f-kube-api-access-z9sdn\") on node \"crc\" DevicePath \"\"" Jan 29 12:33:47 crc kubenswrapper[4753]: I0129 12:33:47.318262 4753 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/701dafed-ce7f-4a89-982d-f243757d097f-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 29 12:33:47 crc kubenswrapper[4753]: I0129 12:33:47.318276 4753 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/701dafed-ce7f-4a89-982d-f243757d097f-etc-swift\") on node \"crc\" DevicePath \"\"" Jan 29 12:33:47 crc kubenswrapper[4753]: I0129 12:33:47.318289 4753 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/701dafed-ce7f-4a89-982d-f243757d097f-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 12:33:47 crc kubenswrapper[4753]: I0129 12:33:47.318299 4753 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/701dafed-ce7f-4a89-982d-f243757d097f-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 29 12:33:47 crc kubenswrapper[4753]: I0129 12:33:47.318311 4753 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/701dafed-ce7f-4a89-982d-f243757d097f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 12:33:47 crc kubenswrapper[4753]: I0129 12:33:47.385259 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9qkh7" Jan 29 12:33:47 crc kubenswrapper[4753]: I0129 12:33:47.524656 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-78qhd\" (UniqueName: \"kubernetes.io/projected/aadb20d7-4116-47ca-9c1e-41d54eb80a6a-kube-api-access-78qhd\") pod \"aadb20d7-4116-47ca-9c1e-41d54eb80a6a\" (UID: \"aadb20d7-4116-47ca-9c1e-41d54eb80a6a\") " Jan 29 12:33:47 crc kubenswrapper[4753]: I0129 12:33:47.524704 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aadb20d7-4116-47ca-9c1e-41d54eb80a6a-utilities\") pod \"aadb20d7-4116-47ca-9c1e-41d54eb80a6a\" (UID: \"aadb20d7-4116-47ca-9c1e-41d54eb80a6a\") " Jan 29 12:33:47 crc kubenswrapper[4753]: I0129 12:33:47.525503 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aadb20d7-4116-47ca-9c1e-41d54eb80a6a-catalog-content\") pod \"aadb20d7-4116-47ca-9c1e-41d54eb80a6a\" (UID: \"aadb20d7-4116-47ca-9c1e-41d54eb80a6a\") " Jan 29 12:33:47 crc kubenswrapper[4753]: I0129 12:33:47.527447 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/aadb20d7-4116-47ca-9c1e-41d54eb80a6a-utilities" (OuterVolumeSpecName: "utilities") pod "aadb20d7-4116-47ca-9c1e-41d54eb80a6a" (UID: "aadb20d7-4116-47ca-9c1e-41d54eb80a6a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:33:47 crc kubenswrapper[4753]: I0129 12:33:47.539578 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aadb20d7-4116-47ca-9c1e-41d54eb80a6a-kube-api-access-78qhd" (OuterVolumeSpecName: "kube-api-access-78qhd") pod "aadb20d7-4116-47ca-9c1e-41d54eb80a6a" (UID: "aadb20d7-4116-47ca-9c1e-41d54eb80a6a"). InnerVolumeSpecName "kube-api-access-78qhd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:33:47 crc kubenswrapper[4753]: I0129 12:33:47.602773 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/aadb20d7-4116-47ca-9c1e-41d54eb80a6a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "aadb20d7-4116-47ca-9c1e-41d54eb80a6a" (UID: "aadb20d7-4116-47ca-9c1e-41d54eb80a6a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:33:47 crc kubenswrapper[4753]: I0129 12:33:47.628486 4753 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aadb20d7-4116-47ca-9c1e-41d54eb80a6a-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 12:33:47 crc kubenswrapper[4753]: I0129 12:33:47.628544 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-78qhd\" (UniqueName: \"kubernetes.io/projected/aadb20d7-4116-47ca-9c1e-41d54eb80a6a-kube-api-access-78qhd\") on node \"crc\" DevicePath \"\"" Jan 29 12:33:47 crc kubenswrapper[4753]: I0129 12:33:47.628557 4753 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aadb20d7-4116-47ca-9c1e-41d54eb80a6a-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 12:33:47 crc kubenswrapper[4753]: I0129 12:33:47.876036 4753 generic.go:334] "Generic (PLEG): container finished" podID="aadb20d7-4116-47ca-9c1e-41d54eb80a6a" containerID="ff7145cf0e1ae9456b4eba1a0a6d7e74359659cb6d13bc0071e0353774472720" exitCode=0 Jan 29 12:33:47 crc kubenswrapper[4753]: I0129 12:33:47.876137 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9qkh7" Jan 29 12:33:47 crc kubenswrapper[4753]: I0129 12:33:47.876099 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9qkh7" event={"ID":"aadb20d7-4116-47ca-9c1e-41d54eb80a6a","Type":"ContainerDied","Data":"ff7145cf0e1ae9456b4eba1a0a6d7e74359659cb6d13bc0071e0353774472720"} Jan 29 12:33:47 crc kubenswrapper[4753]: I0129 12:33:47.876289 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9qkh7" event={"ID":"aadb20d7-4116-47ca-9c1e-41d54eb80a6a","Type":"ContainerDied","Data":"5b0ebbf3a1a9c8a03bdac492b3b811fdfcfed4c2f43aeb9bdf8759ef2d4e2e9e"} Jan 29 12:33:47 crc kubenswrapper[4753]: I0129 12:33:47.876371 4753 scope.go:117] "RemoveContainer" containerID="ff7145cf0e1ae9456b4eba1a0a6d7e74359659cb6d13bc0071e0353774472720" Jan 29 12:33:47 crc kubenswrapper[4753]: I0129 12:33:47.879321 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-proxy-6dc98fc8fb-98r45" Jan 29 12:33:47 crc kubenswrapper[4753]: I0129 12:33:47.879920 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-proxy-6dc98fc8fb-98r45" event={"ID":"701dafed-ce7f-4a89-982d-f243757d097f","Type":"ContainerDied","Data":"1073f3f040f7c8bc61f1fcc62ae69bcdf732c162c51ab1864122fdee932ef251"} Jan 29 12:33:47 crc kubenswrapper[4753]: I0129 12:33:47.898038 4753 scope.go:117] "RemoveContainer" containerID="a775f37947eddea4396e931b71b48ce427539ca47e8ef8512a625c2ff7fcf4ed" Jan 29 12:33:47 crc kubenswrapper[4753]: I0129 12:33:47.904965 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7f7e23b6-3379-4af2-a33b-2e1b12288561" path="/var/lib/kubelet/pods/7f7e23b6-3379-4af2-a33b-2e1b12288561/volumes" Jan 29 12:33:47 crc kubenswrapper[4753]: I0129 12:33:47.923292 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["swift-kuttl-tests/swift-proxy-6dc98fc8fb-98r45"] Jan 29 12:33:47 crc kubenswrapper[4753]: I0129 12:33:47.924188 4753 scope.go:117] "RemoveContainer" containerID="6980bac95a3044928f273e9646567a1f6eb86db4dcf6dfd1f524cf97ad0bc582" Jan 29 12:33:47 crc kubenswrapper[4753]: I0129 12:33:47.936263 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["swift-kuttl-tests/swift-proxy-6dc98fc8fb-98r45"] Jan 29 12:33:47 crc kubenswrapper[4753]: I0129 12:33:47.951009 4753 scope.go:117] "RemoveContainer" containerID="ff7145cf0e1ae9456b4eba1a0a6d7e74359659cb6d13bc0071e0353774472720" Jan 29 12:33:47 crc kubenswrapper[4753]: I0129 12:33:47.953053 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-9qkh7"] Jan 29 12:33:47 crc kubenswrapper[4753]: E0129 12:33:47.956430 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ff7145cf0e1ae9456b4eba1a0a6d7e74359659cb6d13bc0071e0353774472720\": container with ID starting with ff7145cf0e1ae9456b4eba1a0a6d7e74359659cb6d13bc0071e0353774472720 not found: ID does not exist" containerID="ff7145cf0e1ae9456b4eba1a0a6d7e74359659cb6d13bc0071e0353774472720" Jan 29 12:33:47 crc kubenswrapper[4753]: I0129 12:33:47.956485 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ff7145cf0e1ae9456b4eba1a0a6d7e74359659cb6d13bc0071e0353774472720"} err="failed to get container status \"ff7145cf0e1ae9456b4eba1a0a6d7e74359659cb6d13bc0071e0353774472720\": rpc error: code = NotFound desc = could not find container \"ff7145cf0e1ae9456b4eba1a0a6d7e74359659cb6d13bc0071e0353774472720\": container with ID starting with ff7145cf0e1ae9456b4eba1a0a6d7e74359659cb6d13bc0071e0353774472720 not found: ID does not exist" Jan 29 12:33:47 crc kubenswrapper[4753]: I0129 12:33:47.956515 4753 scope.go:117] "RemoveContainer" containerID="a775f37947eddea4396e931b71b48ce427539ca47e8ef8512a625c2ff7fcf4ed" Jan 29 12:33:47 crc kubenswrapper[4753]: I0129 12:33:47.956839 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-9qkh7"] Jan 29 12:33:47 crc kubenswrapper[4753]: E0129 12:33:47.957058 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a775f37947eddea4396e931b71b48ce427539ca47e8ef8512a625c2ff7fcf4ed\": container with ID starting with a775f37947eddea4396e931b71b48ce427539ca47e8ef8512a625c2ff7fcf4ed not found: ID does not exist" containerID="a775f37947eddea4396e931b71b48ce427539ca47e8ef8512a625c2ff7fcf4ed" Jan 29 12:33:47 crc kubenswrapper[4753]: I0129 12:33:47.957113 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a775f37947eddea4396e931b71b48ce427539ca47e8ef8512a625c2ff7fcf4ed"} err="failed to get container status \"a775f37947eddea4396e931b71b48ce427539ca47e8ef8512a625c2ff7fcf4ed\": rpc error: code = NotFound desc = could not find container \"a775f37947eddea4396e931b71b48ce427539ca47e8ef8512a625c2ff7fcf4ed\": container with ID starting with a775f37947eddea4396e931b71b48ce427539ca47e8ef8512a625c2ff7fcf4ed not found: ID does not exist" Jan 29 12:33:47 crc kubenswrapper[4753]: I0129 12:33:47.957152 4753 scope.go:117] "RemoveContainer" containerID="6980bac95a3044928f273e9646567a1f6eb86db4dcf6dfd1f524cf97ad0bc582" Jan 29 12:33:47 crc kubenswrapper[4753]: E0129 12:33:47.957708 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6980bac95a3044928f273e9646567a1f6eb86db4dcf6dfd1f524cf97ad0bc582\": container with ID starting with 6980bac95a3044928f273e9646567a1f6eb86db4dcf6dfd1f524cf97ad0bc582 not found: ID does not exist" containerID="6980bac95a3044928f273e9646567a1f6eb86db4dcf6dfd1f524cf97ad0bc582" Jan 29 12:33:47 crc kubenswrapper[4753]: I0129 12:33:47.957746 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6980bac95a3044928f273e9646567a1f6eb86db4dcf6dfd1f524cf97ad0bc582"} err="failed to get container status \"6980bac95a3044928f273e9646567a1f6eb86db4dcf6dfd1f524cf97ad0bc582\": rpc error: code = NotFound desc = could not find container \"6980bac95a3044928f273e9646567a1f6eb86db4dcf6dfd1f524cf97ad0bc582\": container with ID starting with 6980bac95a3044928f273e9646567a1f6eb86db4dcf6dfd1f524cf97ad0bc582 not found: ID does not exist" Jan 29 12:33:47 crc kubenswrapper[4753]: I0129 12:33:47.957776 4753 scope.go:117] "RemoveContainer" containerID="b750bffbee119f5ecce4790a0734dcf5806f4b767315476acdc2218f32f63848" Jan 29 12:33:47 crc kubenswrapper[4753]: I0129 12:33:47.998173 4753 scope.go:117] "RemoveContainer" containerID="959dd00575d11c76d373a5d72eefd75b0f075ce3ca4fd9b3e55919627e89ae13" Jan 29 12:33:49 crc kubenswrapper[4753]: I0129 12:33:49.795306 4753 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-5cvhh" Jan 29 12:33:49 crc kubenswrapper[4753]: I0129 12:33:49.795716 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-5cvhh" Jan 29 12:33:49 crc kubenswrapper[4753]: I0129 12:33:49.842455 4753 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-5cvhh" Jan 29 12:33:49 crc kubenswrapper[4753]: I0129 12:33:49.913324 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="701dafed-ce7f-4a89-982d-f243757d097f" path="/var/lib/kubelet/pods/701dafed-ce7f-4a89-982d-f243757d097f/volumes" Jan 29 12:33:49 crc kubenswrapper[4753]: I0129 12:33:49.914133 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="aadb20d7-4116-47ca-9c1e-41d54eb80a6a" path="/var/lib/kubelet/pods/aadb20d7-4116-47ca-9c1e-41d54eb80a6a/volumes" Jan 29 12:33:50 crc kubenswrapper[4753]: I0129 12:33:50.888344 4753 scope.go:117] "RemoveContainer" containerID="00211d2562b6632acb04decda9045cbb8d3216adca69cd5bc1380ec42b8211e0" Jan 29 12:33:50 crc kubenswrapper[4753]: E0129 12:33:50.888740 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7c24x_openshift-machine-config-operator(b0310995-a7c7-47c3-ae6c-05daaaba92a6)\"" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" podUID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" Jan 29 12:33:59 crc kubenswrapper[4753]: I0129 12:33:59.855067 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-5cvhh" Jan 29 12:33:59 crc kubenswrapper[4753]: I0129 12:33:59.938677 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-5cvhh"] Jan 29 12:34:00 crc kubenswrapper[4753]: I0129 12:34:00.007214 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-5cvhh" podUID="03e6ef3e-14b4-4cb6-8eff-514f4b464ed1" containerName="registry-server" containerID="cri-o://c6e7b49e87598e4ca5111f79014f8089db21ac83078f12bd5bfe8413ff3cde6f" gracePeriod=2 Jan 29 12:34:01 crc kubenswrapper[4753]: I0129 12:34:01.021322 4753 generic.go:334] "Generic (PLEG): container finished" podID="03e6ef3e-14b4-4cb6-8eff-514f4b464ed1" containerID="c6e7b49e87598e4ca5111f79014f8089db21ac83078f12bd5bfe8413ff3cde6f" exitCode=0 Jan 29 12:34:01 crc kubenswrapper[4753]: I0129 12:34:01.021382 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5cvhh" event={"ID":"03e6ef3e-14b4-4cb6-8eff-514f4b464ed1","Type":"ContainerDied","Data":"c6e7b49e87598e4ca5111f79014f8089db21ac83078f12bd5bfe8413ff3cde6f"} Jan 29 12:34:01 crc kubenswrapper[4753]: I0129 12:34:01.559344 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5cvhh" Jan 29 12:34:01 crc kubenswrapper[4753]: I0129 12:34:01.616509 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/03e6ef3e-14b4-4cb6-8eff-514f4b464ed1-utilities\") pod \"03e6ef3e-14b4-4cb6-8eff-514f4b464ed1\" (UID: \"03e6ef3e-14b4-4cb6-8eff-514f4b464ed1\") " Jan 29 12:34:01 crc kubenswrapper[4753]: I0129 12:34:01.616647 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/03e6ef3e-14b4-4cb6-8eff-514f4b464ed1-catalog-content\") pod \"03e6ef3e-14b4-4cb6-8eff-514f4b464ed1\" (UID: \"03e6ef3e-14b4-4cb6-8eff-514f4b464ed1\") " Jan 29 12:34:01 crc kubenswrapper[4753]: I0129 12:34:01.616697 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4xdc\" (UniqueName: \"kubernetes.io/projected/03e6ef3e-14b4-4cb6-8eff-514f4b464ed1-kube-api-access-d4xdc\") pod \"03e6ef3e-14b4-4cb6-8eff-514f4b464ed1\" (UID: \"03e6ef3e-14b4-4cb6-8eff-514f4b464ed1\") " Jan 29 12:34:01 crc kubenswrapper[4753]: I0129 12:34:01.617970 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/03e6ef3e-14b4-4cb6-8eff-514f4b464ed1-utilities" (OuterVolumeSpecName: "utilities") pod "03e6ef3e-14b4-4cb6-8eff-514f4b464ed1" (UID: "03e6ef3e-14b4-4cb6-8eff-514f4b464ed1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:34:01 crc kubenswrapper[4753]: I0129 12:34:01.625393 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/03e6ef3e-14b4-4cb6-8eff-514f4b464ed1-kube-api-access-d4xdc" (OuterVolumeSpecName: "kube-api-access-d4xdc") pod "03e6ef3e-14b4-4cb6-8eff-514f4b464ed1" (UID: "03e6ef3e-14b4-4cb6-8eff-514f4b464ed1"). InnerVolumeSpecName "kube-api-access-d4xdc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:34:01 crc kubenswrapper[4753]: I0129 12:34:01.670454 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/03e6ef3e-14b4-4cb6-8eff-514f4b464ed1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "03e6ef3e-14b4-4cb6-8eff-514f4b464ed1" (UID: "03e6ef3e-14b4-4cb6-8eff-514f4b464ed1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:34:01 crc kubenswrapper[4753]: I0129 12:34:01.718422 4753 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/03e6ef3e-14b4-4cb6-8eff-514f4b464ed1-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 12:34:01 crc kubenswrapper[4753]: I0129 12:34:01.718460 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4xdc\" (UniqueName: \"kubernetes.io/projected/03e6ef3e-14b4-4cb6-8eff-514f4b464ed1-kube-api-access-d4xdc\") on node \"crc\" DevicePath \"\"" Jan 29 12:34:01 crc kubenswrapper[4753]: I0129 12:34:01.718476 4753 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/03e6ef3e-14b4-4cb6-8eff-514f4b464ed1-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 12:34:02 crc kubenswrapper[4753]: I0129 12:34:02.041915 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5cvhh" event={"ID":"03e6ef3e-14b4-4cb6-8eff-514f4b464ed1","Type":"ContainerDied","Data":"89d6782ddc3bd90292a12550dc0ed65c236494808915cbd46c81a5eb2731ac6d"} Jan 29 12:34:02 crc kubenswrapper[4753]: I0129 12:34:02.042520 4753 scope.go:117] "RemoveContainer" containerID="c6e7b49e87598e4ca5111f79014f8089db21ac83078f12bd5bfe8413ff3cde6f" Jan 29 12:34:02 crc kubenswrapper[4753]: I0129 12:34:02.042170 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5cvhh" Jan 29 12:34:02 crc kubenswrapper[4753]: I0129 12:34:02.072309 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-5cvhh"] Jan 29 12:34:02 crc kubenswrapper[4753]: I0129 12:34:02.073574 4753 scope.go:117] "RemoveContainer" containerID="f6af4b94bc866dc19aa2e18845cf2d9c8eb9767390915622a3a3f09c76dbeff7" Jan 29 12:34:02 crc kubenswrapper[4753]: I0129 12:34:02.077669 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-5cvhh"] Jan 29 12:34:02 crc kubenswrapper[4753]: I0129 12:34:02.100021 4753 scope.go:117] "RemoveContainer" containerID="3ed9c8cd2d7a775487c935f442e66a0d4db41bfea51913db820346be7b4b71d5" Jan 29 12:34:03 crc kubenswrapper[4753]: I0129 12:34:03.903444 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="03e6ef3e-14b4-4cb6-8eff-514f4b464ed1" path="/var/lib/kubelet/pods/03e6ef3e-14b4-4cb6-8eff-514f4b464ed1/volumes" Jan 29 12:34:05 crc kubenswrapper[4753]: I0129 12:34:05.889133 4753 scope.go:117] "RemoveContainer" containerID="00211d2562b6632acb04decda9045cbb8d3216adca69cd5bc1380ec42b8211e0" Jan 29 12:34:05 crc kubenswrapper[4753]: E0129 12:34:05.890196 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7c24x_openshift-machine-config-operator(b0310995-a7c7-47c3-ae6c-05daaaba92a6)\"" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" podUID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" Jan 29 12:34:16 crc kubenswrapper[4753]: I0129 12:34:16.609085 4753 generic.go:334] "Generic (PLEG): container finished" podID="440fbaaa-a825-4b7c-b5f5-6cad02f05493" containerID="900df4746b0918c8eb5088f4f6d4c404b574f9420a7a18b72696e8ecaf09f44c" exitCode=137 Jan 29 12:34:16 crc kubenswrapper[4753]: I0129 12:34:16.609344 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"440fbaaa-a825-4b7c-b5f5-6cad02f05493","Type":"ContainerDied","Data":"900df4746b0918c8eb5088f4f6d4c404b574f9420a7a18b72696e8ecaf09f44c"} Jan 29 12:34:16 crc kubenswrapper[4753]: I0129 12:34:16.889702 4753 scope.go:117] "RemoveContainer" containerID="00211d2562b6632acb04decda9045cbb8d3216adca69cd5bc1380ec42b8211e0" Jan 29 12:34:16 crc kubenswrapper[4753]: E0129 12:34:16.890444 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7c24x_openshift-machine-config-operator(b0310995-a7c7-47c3-ae6c-05daaaba92a6)\"" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" podUID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" Jan 29 12:34:17 crc kubenswrapper[4753]: I0129 12:34:17.398337 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:34:17 crc kubenswrapper[4753]: I0129 12:34:17.425595 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/440fbaaa-a825-4b7c-b5f5-6cad02f05493-etc-swift\") pod \"440fbaaa-a825-4b7c-b5f5-6cad02f05493\" (UID: \"440fbaaa-a825-4b7c-b5f5-6cad02f05493\") " Jan 29 12:34:17 crc kubenswrapper[4753]: I0129 12:34:17.425698 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/440fbaaa-a825-4b7c-b5f5-6cad02f05493-cache\") pod \"440fbaaa-a825-4b7c-b5f5-6cad02f05493\" (UID: \"440fbaaa-a825-4b7c-b5f5-6cad02f05493\") " Jan 29 12:34:17 crc kubenswrapper[4753]: I0129 12:34:17.425721 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/440fbaaa-a825-4b7c-b5f5-6cad02f05493-combined-ca-bundle\") pod \"440fbaaa-a825-4b7c-b5f5-6cad02f05493\" (UID: \"440fbaaa-a825-4b7c-b5f5-6cad02f05493\") " Jan 29 12:34:17 crc kubenswrapper[4753]: I0129 12:34:17.425961 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swift\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"440fbaaa-a825-4b7c-b5f5-6cad02f05493\" (UID: \"440fbaaa-a825-4b7c-b5f5-6cad02f05493\") " Jan 29 12:34:17 crc kubenswrapper[4753]: I0129 12:34:17.426029 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/440fbaaa-a825-4b7c-b5f5-6cad02f05493-lock\") pod \"440fbaaa-a825-4b7c-b5f5-6cad02f05493\" (UID: \"440fbaaa-a825-4b7c-b5f5-6cad02f05493\") " Jan 29 12:34:17 crc kubenswrapper[4753]: I0129 12:34:17.426089 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2wrvw\" (UniqueName: \"kubernetes.io/projected/440fbaaa-a825-4b7c-b5f5-6cad02f05493-kube-api-access-2wrvw\") pod \"440fbaaa-a825-4b7c-b5f5-6cad02f05493\" (UID: \"440fbaaa-a825-4b7c-b5f5-6cad02f05493\") " Jan 29 12:34:17 crc kubenswrapper[4753]: I0129 12:34:17.426499 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/440fbaaa-a825-4b7c-b5f5-6cad02f05493-cache" (OuterVolumeSpecName: "cache") pod "440fbaaa-a825-4b7c-b5f5-6cad02f05493" (UID: "440fbaaa-a825-4b7c-b5f5-6cad02f05493"). InnerVolumeSpecName "cache". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:34:17 crc kubenswrapper[4753]: I0129 12:34:17.427020 4753 reconciler_common.go:293] "Volume detached for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/440fbaaa-a825-4b7c-b5f5-6cad02f05493-cache\") on node \"crc\" DevicePath \"\"" Jan 29 12:34:17 crc kubenswrapper[4753]: I0129 12:34:17.427384 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/440fbaaa-a825-4b7c-b5f5-6cad02f05493-lock" (OuterVolumeSpecName: "lock") pod "440fbaaa-a825-4b7c-b5f5-6cad02f05493" (UID: "440fbaaa-a825-4b7c-b5f5-6cad02f05493"). InnerVolumeSpecName "lock". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:34:17 crc kubenswrapper[4753]: I0129 12:34:17.438631 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage07-crc" (OuterVolumeSpecName: "swift") pod "440fbaaa-a825-4b7c-b5f5-6cad02f05493" (UID: "440fbaaa-a825-4b7c-b5f5-6cad02f05493"). InnerVolumeSpecName "local-storage07-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 29 12:34:17 crc kubenswrapper[4753]: I0129 12:34:17.439645 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/440fbaaa-a825-4b7c-b5f5-6cad02f05493-kube-api-access-2wrvw" (OuterVolumeSpecName: "kube-api-access-2wrvw") pod "440fbaaa-a825-4b7c-b5f5-6cad02f05493" (UID: "440fbaaa-a825-4b7c-b5f5-6cad02f05493"). InnerVolumeSpecName "kube-api-access-2wrvw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:34:17 crc kubenswrapper[4753]: I0129 12:34:17.439921 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/440fbaaa-a825-4b7c-b5f5-6cad02f05493-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "440fbaaa-a825-4b7c-b5f5-6cad02f05493" (UID: "440fbaaa-a825-4b7c-b5f5-6cad02f05493"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:34:17 crc kubenswrapper[4753]: I0129 12:34:17.530008 4753 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/440fbaaa-a825-4b7c-b5f5-6cad02f05493-etc-swift\") on node \"crc\" DevicePath \"\"" Jan 29 12:34:17 crc kubenswrapper[4753]: I0129 12:34:17.530345 4753 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" " Jan 29 12:34:17 crc kubenswrapper[4753]: I0129 12:34:17.530444 4753 reconciler_common.go:293] "Volume detached for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/440fbaaa-a825-4b7c-b5f5-6cad02f05493-lock\") on node \"crc\" DevicePath \"\"" Jan 29 12:34:17 crc kubenswrapper[4753]: I0129 12:34:17.530510 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2wrvw\" (UniqueName: \"kubernetes.io/projected/440fbaaa-a825-4b7c-b5f5-6cad02f05493-kube-api-access-2wrvw\") on node \"crc\" DevicePath \"\"" Jan 29 12:34:17 crc kubenswrapper[4753]: I0129 12:34:17.551291 4753 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage07-crc" (UniqueName: "kubernetes.io/local-volume/local-storage07-crc") on node "crc" Jan 29 12:34:17 crc kubenswrapper[4753]: I0129 12:34:17.627418 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"440fbaaa-a825-4b7c-b5f5-6cad02f05493","Type":"ContainerDied","Data":"bc473b86e445d2aedcc62ad3ef717571ca0e7e280c21cdabf66085756548e8e0"} Jan 29 12:34:17 crc kubenswrapper[4753]: I0129 12:34:17.627505 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-storage-0" Jan 29 12:34:17 crc kubenswrapper[4753]: I0129 12:34:17.627532 4753 scope.go:117] "RemoveContainer" containerID="900df4746b0918c8eb5088f4f6d4c404b574f9420a7a18b72696e8ecaf09f44c" Jan 29 12:34:17 crc kubenswrapper[4753]: I0129 12:34:17.631973 4753 reconciler_common.go:293] "Volume detached for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" DevicePath \"\"" Jan 29 12:34:17 crc kubenswrapper[4753]: I0129 12:34:17.661194 4753 scope.go:117] "RemoveContainer" containerID="6ed6a67656688240784161e2afe42b71bae3c0303ed38ffaa323ceac1ef5904e" Jan 29 12:34:17 crc kubenswrapper[4753]: I0129 12:34:17.685964 4753 scope.go:117] "RemoveContainer" containerID="2f98b86bc7ca6dd5f12c68ae372c3b08c915a79d4a412be9a0dc8912b8a2f306" Jan 29 12:34:17 crc kubenswrapper[4753]: I0129 12:34:17.704359 4753 scope.go:117] "RemoveContainer" containerID="55f17debcca873fa0e4b350e37f48eccf285bd6343d9100b65c1d6c15e327c17" Jan 29 12:34:17 crc kubenswrapper[4753]: I0129 12:34:17.725207 4753 scope.go:117] "RemoveContainer" containerID="b9a8c83cc567f746d39090743df2241dbec28eadb1b87985af13a50bd159fdcf" Jan 29 12:34:17 crc kubenswrapper[4753]: I0129 12:34:17.744580 4753 scope.go:117] "RemoveContainer" containerID="738fba01dd53bc3b8267f8fea84ccec85ed8fa14f9fc31f54c1e042155152011" Jan 29 12:34:17 crc kubenswrapper[4753]: I0129 12:34:17.753159 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/440fbaaa-a825-4b7c-b5f5-6cad02f05493-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "440fbaaa-a825-4b7c-b5f5-6cad02f05493" (UID: "440fbaaa-a825-4b7c-b5f5-6cad02f05493"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:34:17 crc kubenswrapper[4753]: I0129 12:34:17.765581 4753 scope.go:117] "RemoveContainer" containerID="2dcdb7bdea7d2a6d5b282fce60f0c19a6fd11fd604776be594edb82ce7cd9a4c" Jan 29 12:34:17 crc kubenswrapper[4753]: I0129 12:34:17.784645 4753 scope.go:117] "RemoveContainer" containerID="38ffd345e6e19bb944c2ed10f92c7403639786482a7a0b125342c246e8133282" Jan 29 12:34:17 crc kubenswrapper[4753]: I0129 12:34:17.801138 4753 scope.go:117] "RemoveContainer" containerID="11c13ea009f76d88d345a6564779313fa89bcd1b81422014e7b84a9eb66f971b" Jan 29 12:34:17 crc kubenswrapper[4753]: I0129 12:34:17.819644 4753 scope.go:117] "RemoveContainer" containerID="77d4d57081b842e9efb2e33c3236053740c90c43369d47da680dedcc95e4c562" Jan 29 12:34:17 crc kubenswrapper[4753]: I0129 12:34:17.834656 4753 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/440fbaaa-a825-4b7c-b5f5-6cad02f05493-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 12:34:17 crc kubenswrapper[4753]: I0129 12:34:17.836569 4753 scope.go:117] "RemoveContainer" containerID="3f287468af691c24dee7c6dcafdc234df722b2cc8996c2a8d2b114e4bc8a0670" Jan 29 12:34:17 crc kubenswrapper[4753]: I0129 12:34:17.853524 4753 scope.go:117] "RemoveContainer" containerID="a5249ab8050d2462f51d1dbfd594b60152e053e89d8c52cfbd2ef0c85b2a7d3b" Jan 29 12:34:17 crc kubenswrapper[4753]: I0129 12:34:17.869542 4753 scope.go:117] "RemoveContainer" containerID="e217bdffa73d03c29ee18c168cdec6a8b4e9c536de16348101bdeba622564205" Jan 29 12:34:17 crc kubenswrapper[4753]: I0129 12:34:17.898178 4753 scope.go:117] "RemoveContainer" containerID="a41389a7fe0155c69f23818035276e36fa96b77ccb336676bb2c0834dacf2966" Jan 29 12:34:17 crc kubenswrapper[4753]: I0129 12:34:17.923748 4753 scope.go:117] "RemoveContainer" containerID="c6e88c5f166e867ae6aadcf20c840040fbb1dc60adc0d0e24432e34480530106" Jan 29 12:34:17 crc kubenswrapper[4753]: I0129 12:34:17.962926 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["swift-kuttl-tests/swift-storage-0"] Jan 29 12:34:17 crc kubenswrapper[4753]: I0129 12:34:17.968384 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["swift-kuttl-tests/swift-storage-0"] Jan 29 12:34:19 crc kubenswrapper[4753]: I0129 12:34:19.903818 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="440fbaaa-a825-4b7c-b5f5-6cad02f05493" path="/var/lib/kubelet/pods/440fbaaa-a825-4b7c-b5f5-6cad02f05493/volumes" Jan 29 12:34:27 crc kubenswrapper[4753]: I0129 12:34:27.892776 4753 scope.go:117] "RemoveContainer" containerID="00211d2562b6632acb04decda9045cbb8d3216adca69cd5bc1380ec42b8211e0" Jan 29 12:34:27 crc kubenswrapper[4753]: E0129 12:34:27.893549 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7c24x_openshift-machine-config-operator(b0310995-a7c7-47c3-ae6c-05daaaba92a6)\"" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" podUID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" Jan 29 12:34:30 crc kubenswrapper[4753]: I0129 12:34:30.012183 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["swift-kuttl-tests/barbican-db-sync-gq7gm"] Jan 29 12:34:30 crc kubenswrapper[4753]: I0129 12:34:30.018435 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["swift-kuttl-tests/barbican-db-sync-gq7gm"] Jan 29 12:34:30 crc kubenswrapper[4753]: I0129 12:34:30.061217 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["swift-kuttl-tests/barbican-keystone-listener-55fbcf8568-mtgrc"] Jan 29 12:34:30 crc kubenswrapper[4753]: I0129 12:34:30.061824 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/barbican-keystone-listener-55fbcf8568-mtgrc" podUID="3f4a7f8b-0fa0-48f7-a3c1-8dbaf4a4f852" containerName="barbican-keystone-listener-log" containerID="cri-o://6d0de795d293a9496cc57597275c0e8e4aac8929167336a966effd96d47af5b8" gracePeriod=30 Jan 29 12:34:30 crc kubenswrapper[4753]: I0129 12:34:30.062336 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/barbican-keystone-listener-55fbcf8568-mtgrc" podUID="3f4a7f8b-0fa0-48f7-a3c1-8dbaf4a4f852" containerName="barbican-keystone-listener" containerID="cri-o://1991d457dadef2edd40ec30edf373e24eb3017e81ed995747af5afd3c5855f98" gracePeriod=30 Jan 29 12:34:30 crc kubenswrapper[4753]: I0129 12:34:30.092117 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["swift-kuttl-tests/barbican-worker-d6d6cc49c-6q5wj"] Jan 29 12:34:30 crc kubenswrapper[4753]: I0129 12:34:30.092380 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/barbican-worker-d6d6cc49c-6q5wj" podUID="49d7637b-85ec-47c7-bf42-a55aa5a8b8dd" containerName="barbican-worker-log" containerID="cri-o://fc8fc10b9da2aaaf37221ef3ec40f59ab9e07800d4b88fe042c4833f0e2810c5" gracePeriod=30 Jan 29 12:34:30 crc kubenswrapper[4753]: I0129 12:34:30.092688 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/barbican-worker-d6d6cc49c-6q5wj" podUID="49d7637b-85ec-47c7-bf42-a55aa5a8b8dd" containerName="barbican-worker" containerID="cri-o://c62784be0ecc6f613d61e3d10582c6f028f1c1dea1033542d99572833441d2cd" gracePeriod=30 Jan 29 12:34:30 crc kubenswrapper[4753]: I0129 12:34:30.130750 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["swift-kuttl-tests/barbican-api-6b47c5b866-9jg8k"] Jan 29 12:34:30 crc kubenswrapper[4753]: I0129 12:34:30.131019 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/barbican-api-6b47c5b866-9jg8k" podUID="ee7aaa1d-e1b4-4ec4-8ef6-2bffb1fa71e1" containerName="barbican-api-log" containerID="cri-o://62c7539c2fe6cfc8d7dc37561f9c1101de0457fb66f73fdf92cfc51c43d3c6e2" gracePeriod=30 Jan 29 12:34:30 crc kubenswrapper[4753]: I0129 12:34:30.131520 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/barbican-api-6b47c5b866-9jg8k" podUID="ee7aaa1d-e1b4-4ec4-8ef6-2bffb1fa71e1" containerName="barbican-api" containerID="cri-o://f5db0ea18c845c065ed6db5307b995c2d28fb39b39c926836dba6ea02f233fcc" gracePeriod=30 Jan 29 12:34:30 crc kubenswrapper[4753]: I0129 12:34:30.142862 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["swift-kuttl-tests/barbicanc51a-account-delete-zxwrl"] Jan 29 12:34:30 crc kubenswrapper[4753]: E0129 12:34:30.143406 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="440fbaaa-a825-4b7c-b5f5-6cad02f05493" containerName="account-reaper" Jan 29 12:34:30 crc kubenswrapper[4753]: I0129 12:34:30.143459 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="440fbaaa-a825-4b7c-b5f5-6cad02f05493" containerName="account-reaper" Jan 29 12:34:30 crc kubenswrapper[4753]: E0129 12:34:30.143482 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="440fbaaa-a825-4b7c-b5f5-6cad02f05493" containerName="object-server" Jan 29 12:34:30 crc kubenswrapper[4753]: I0129 12:34:30.143490 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="440fbaaa-a825-4b7c-b5f5-6cad02f05493" containerName="object-server" Jan 29 12:34:30 crc kubenswrapper[4753]: E0129 12:34:30.143499 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="03e6ef3e-14b4-4cb6-8eff-514f4b464ed1" containerName="extract-utilities" Jan 29 12:34:30 crc kubenswrapper[4753]: I0129 12:34:30.143508 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="03e6ef3e-14b4-4cb6-8eff-514f4b464ed1" containerName="extract-utilities" Jan 29 12:34:30 crc kubenswrapper[4753]: E0129 12:34:30.143521 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="440fbaaa-a825-4b7c-b5f5-6cad02f05493" containerName="account-auditor" Jan 29 12:34:30 crc kubenswrapper[4753]: I0129 12:34:30.143528 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="440fbaaa-a825-4b7c-b5f5-6cad02f05493" containerName="account-auditor" Jan 29 12:34:30 crc kubenswrapper[4753]: E0129 12:34:30.143542 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="701dafed-ce7f-4a89-982d-f243757d097f" containerName="proxy-server" Jan 29 12:34:30 crc kubenswrapper[4753]: I0129 12:34:30.143550 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="701dafed-ce7f-4a89-982d-f243757d097f" containerName="proxy-server" Jan 29 12:34:30 crc kubenswrapper[4753]: E0129 12:34:30.143563 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="03e6ef3e-14b4-4cb6-8eff-514f4b464ed1" containerName="registry-server" Jan 29 12:34:30 crc kubenswrapper[4753]: I0129 12:34:30.143570 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="03e6ef3e-14b4-4cb6-8eff-514f4b464ed1" containerName="registry-server" Jan 29 12:34:30 crc kubenswrapper[4753]: E0129 12:34:30.143581 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aadb20d7-4116-47ca-9c1e-41d54eb80a6a" containerName="extract-content" Jan 29 12:34:30 crc kubenswrapper[4753]: I0129 12:34:30.143588 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="aadb20d7-4116-47ca-9c1e-41d54eb80a6a" containerName="extract-content" Jan 29 12:34:30 crc kubenswrapper[4753]: E0129 12:34:30.143602 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="440fbaaa-a825-4b7c-b5f5-6cad02f05493" containerName="container-updater" Jan 29 12:34:30 crc kubenswrapper[4753]: I0129 12:34:30.143610 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="440fbaaa-a825-4b7c-b5f5-6cad02f05493" containerName="container-updater" Jan 29 12:34:30 crc kubenswrapper[4753]: E0129 12:34:30.143620 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="701dafed-ce7f-4a89-982d-f243757d097f" containerName="proxy-httpd" Jan 29 12:34:30 crc kubenswrapper[4753]: I0129 12:34:30.143629 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="701dafed-ce7f-4a89-982d-f243757d097f" containerName="proxy-httpd" Jan 29 12:34:30 crc kubenswrapper[4753]: E0129 12:34:30.143642 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="440fbaaa-a825-4b7c-b5f5-6cad02f05493" containerName="object-expirer" Jan 29 12:34:30 crc kubenswrapper[4753]: I0129 12:34:30.143648 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="440fbaaa-a825-4b7c-b5f5-6cad02f05493" containerName="object-expirer" Jan 29 12:34:30 crc kubenswrapper[4753]: E0129 12:34:30.143661 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="440fbaaa-a825-4b7c-b5f5-6cad02f05493" containerName="account-server" Jan 29 12:34:30 crc kubenswrapper[4753]: I0129 12:34:30.143668 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="440fbaaa-a825-4b7c-b5f5-6cad02f05493" containerName="account-server" Jan 29 12:34:30 crc kubenswrapper[4753]: E0129 12:34:30.143678 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="440fbaaa-a825-4b7c-b5f5-6cad02f05493" containerName="account-replicator" Jan 29 12:34:30 crc kubenswrapper[4753]: I0129 12:34:30.143685 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="440fbaaa-a825-4b7c-b5f5-6cad02f05493" containerName="account-replicator" Jan 29 12:34:30 crc kubenswrapper[4753]: E0129 12:34:30.143698 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="440fbaaa-a825-4b7c-b5f5-6cad02f05493" containerName="rsync" Jan 29 12:34:30 crc kubenswrapper[4753]: I0129 12:34:30.143705 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="440fbaaa-a825-4b7c-b5f5-6cad02f05493" containerName="rsync" Jan 29 12:34:30 crc kubenswrapper[4753]: E0129 12:34:30.143715 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="440fbaaa-a825-4b7c-b5f5-6cad02f05493" containerName="object-auditor" Jan 29 12:34:30 crc kubenswrapper[4753]: I0129 12:34:30.143722 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="440fbaaa-a825-4b7c-b5f5-6cad02f05493" containerName="object-auditor" Jan 29 12:34:30 crc kubenswrapper[4753]: E0129 12:34:30.143730 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="440fbaaa-a825-4b7c-b5f5-6cad02f05493" containerName="swift-recon-cron" Jan 29 12:34:30 crc kubenswrapper[4753]: I0129 12:34:30.143737 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="440fbaaa-a825-4b7c-b5f5-6cad02f05493" containerName="swift-recon-cron" Jan 29 12:34:30 crc kubenswrapper[4753]: E0129 12:34:30.143750 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aadb20d7-4116-47ca-9c1e-41d54eb80a6a" containerName="registry-server" Jan 29 12:34:30 crc kubenswrapper[4753]: I0129 12:34:30.143757 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="aadb20d7-4116-47ca-9c1e-41d54eb80a6a" containerName="registry-server" Jan 29 12:34:30 crc kubenswrapper[4753]: E0129 12:34:30.143779 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aadb20d7-4116-47ca-9c1e-41d54eb80a6a" containerName="extract-utilities" Jan 29 12:34:30 crc kubenswrapper[4753]: I0129 12:34:30.143788 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="aadb20d7-4116-47ca-9c1e-41d54eb80a6a" containerName="extract-utilities" Jan 29 12:34:30 crc kubenswrapper[4753]: E0129 12:34:30.143805 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="440fbaaa-a825-4b7c-b5f5-6cad02f05493" containerName="container-server" Jan 29 12:34:30 crc kubenswrapper[4753]: I0129 12:34:30.143814 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="440fbaaa-a825-4b7c-b5f5-6cad02f05493" containerName="container-server" Jan 29 12:34:30 crc kubenswrapper[4753]: E0129 12:34:30.143827 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="440fbaaa-a825-4b7c-b5f5-6cad02f05493" containerName="object-updater" Jan 29 12:34:30 crc kubenswrapper[4753]: I0129 12:34:30.143833 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="440fbaaa-a825-4b7c-b5f5-6cad02f05493" containerName="object-updater" Jan 29 12:34:30 crc kubenswrapper[4753]: E0129 12:34:30.143841 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="440fbaaa-a825-4b7c-b5f5-6cad02f05493" containerName="object-replicator" Jan 29 12:34:30 crc kubenswrapper[4753]: I0129 12:34:30.143849 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="440fbaaa-a825-4b7c-b5f5-6cad02f05493" containerName="object-replicator" Jan 29 12:34:30 crc kubenswrapper[4753]: E0129 12:34:30.143860 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="440fbaaa-a825-4b7c-b5f5-6cad02f05493" containerName="container-replicator" Jan 29 12:34:30 crc kubenswrapper[4753]: I0129 12:34:30.143866 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="440fbaaa-a825-4b7c-b5f5-6cad02f05493" containerName="container-replicator" Jan 29 12:34:30 crc kubenswrapper[4753]: E0129 12:34:30.143878 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="440fbaaa-a825-4b7c-b5f5-6cad02f05493" containerName="container-auditor" Jan 29 12:34:30 crc kubenswrapper[4753]: I0129 12:34:30.143885 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="440fbaaa-a825-4b7c-b5f5-6cad02f05493" containerName="container-auditor" Jan 29 12:34:30 crc kubenswrapper[4753]: E0129 12:34:30.143897 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="03e6ef3e-14b4-4cb6-8eff-514f4b464ed1" containerName="extract-content" Jan 29 12:34:30 crc kubenswrapper[4753]: I0129 12:34:30.143904 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="03e6ef3e-14b4-4cb6-8eff-514f4b464ed1" containerName="extract-content" Jan 29 12:34:30 crc kubenswrapper[4753]: I0129 12:34:30.144161 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="701dafed-ce7f-4a89-982d-f243757d097f" containerName="proxy-httpd" Jan 29 12:34:30 crc kubenswrapper[4753]: I0129 12:34:30.144187 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="701dafed-ce7f-4a89-982d-f243757d097f" containerName="proxy-server" Jan 29 12:34:30 crc kubenswrapper[4753]: I0129 12:34:30.144194 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="440fbaaa-a825-4b7c-b5f5-6cad02f05493" containerName="object-updater" Jan 29 12:34:30 crc kubenswrapper[4753]: I0129 12:34:30.144204 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="440fbaaa-a825-4b7c-b5f5-6cad02f05493" containerName="account-server" Jan 29 12:34:30 crc kubenswrapper[4753]: I0129 12:34:30.144214 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="03e6ef3e-14b4-4cb6-8eff-514f4b464ed1" containerName="registry-server" Jan 29 12:34:30 crc kubenswrapper[4753]: I0129 12:34:30.144241 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="440fbaaa-a825-4b7c-b5f5-6cad02f05493" containerName="object-replicator" Jan 29 12:34:30 crc kubenswrapper[4753]: I0129 12:34:30.144253 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="440fbaaa-a825-4b7c-b5f5-6cad02f05493" containerName="swift-recon-cron" Jan 29 12:34:30 crc kubenswrapper[4753]: I0129 12:34:30.144263 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="440fbaaa-a825-4b7c-b5f5-6cad02f05493" containerName="container-auditor" Jan 29 12:34:30 crc kubenswrapper[4753]: I0129 12:34:30.144276 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="440fbaaa-a825-4b7c-b5f5-6cad02f05493" containerName="container-updater" Jan 29 12:34:30 crc kubenswrapper[4753]: I0129 12:34:30.144287 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="440fbaaa-a825-4b7c-b5f5-6cad02f05493" containerName="object-server" Jan 29 12:34:30 crc kubenswrapper[4753]: I0129 12:34:30.144294 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="440fbaaa-a825-4b7c-b5f5-6cad02f05493" containerName="account-auditor" Jan 29 12:34:30 crc kubenswrapper[4753]: I0129 12:34:30.144304 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="440fbaaa-a825-4b7c-b5f5-6cad02f05493" containerName="rsync" Jan 29 12:34:30 crc kubenswrapper[4753]: I0129 12:34:30.144315 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="440fbaaa-a825-4b7c-b5f5-6cad02f05493" containerName="container-server" Jan 29 12:34:30 crc kubenswrapper[4753]: I0129 12:34:30.144326 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="440fbaaa-a825-4b7c-b5f5-6cad02f05493" containerName="account-replicator" Jan 29 12:34:30 crc kubenswrapper[4753]: I0129 12:34:30.144337 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="440fbaaa-a825-4b7c-b5f5-6cad02f05493" containerName="account-reaper" Jan 29 12:34:30 crc kubenswrapper[4753]: I0129 12:34:30.144350 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="440fbaaa-a825-4b7c-b5f5-6cad02f05493" containerName="object-auditor" Jan 29 12:34:30 crc kubenswrapper[4753]: I0129 12:34:30.144363 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="aadb20d7-4116-47ca-9c1e-41d54eb80a6a" containerName="registry-server" Jan 29 12:34:30 crc kubenswrapper[4753]: I0129 12:34:30.144371 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="440fbaaa-a825-4b7c-b5f5-6cad02f05493" containerName="object-expirer" Jan 29 12:34:30 crc kubenswrapper[4753]: I0129 12:34:30.144384 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="440fbaaa-a825-4b7c-b5f5-6cad02f05493" containerName="container-replicator" Jan 29 12:34:30 crc kubenswrapper[4753]: I0129 12:34:30.145172 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/barbicanc51a-account-delete-zxwrl" Jan 29 12:34:30 crc kubenswrapper[4753]: I0129 12:34:30.186164 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/barbicanc51a-account-delete-zxwrl"] Jan 29 12:34:30 crc kubenswrapper[4753]: I0129 12:34:30.487354 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v5ctn\" (UniqueName: \"kubernetes.io/projected/9841bda3-b354-449f-ac31-65752f2735b8-kube-api-access-v5ctn\") pod \"barbicanc51a-account-delete-zxwrl\" (UID: \"9841bda3-b354-449f-ac31-65752f2735b8\") " pod="swift-kuttl-tests/barbicanc51a-account-delete-zxwrl" Jan 29 12:34:30 crc kubenswrapper[4753]: I0129 12:34:30.487849 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9841bda3-b354-449f-ac31-65752f2735b8-operator-scripts\") pod \"barbicanc51a-account-delete-zxwrl\" (UID: \"9841bda3-b354-449f-ac31-65752f2735b8\") " pod="swift-kuttl-tests/barbicanc51a-account-delete-zxwrl" Jan 29 12:34:30 crc kubenswrapper[4753]: I0129 12:34:30.589658 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v5ctn\" (UniqueName: \"kubernetes.io/projected/9841bda3-b354-449f-ac31-65752f2735b8-kube-api-access-v5ctn\") pod \"barbicanc51a-account-delete-zxwrl\" (UID: \"9841bda3-b354-449f-ac31-65752f2735b8\") " pod="swift-kuttl-tests/barbicanc51a-account-delete-zxwrl" Jan 29 12:34:30 crc kubenswrapper[4753]: I0129 12:34:30.589789 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9841bda3-b354-449f-ac31-65752f2735b8-operator-scripts\") pod \"barbicanc51a-account-delete-zxwrl\" (UID: \"9841bda3-b354-449f-ac31-65752f2735b8\") " pod="swift-kuttl-tests/barbicanc51a-account-delete-zxwrl" Jan 29 12:34:30 crc kubenswrapper[4753]: I0129 12:34:30.590723 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9841bda3-b354-449f-ac31-65752f2735b8-operator-scripts\") pod \"barbicanc51a-account-delete-zxwrl\" (UID: \"9841bda3-b354-449f-ac31-65752f2735b8\") " pod="swift-kuttl-tests/barbicanc51a-account-delete-zxwrl" Jan 29 12:34:30 crc kubenswrapper[4753]: I0129 12:34:30.611093 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v5ctn\" (UniqueName: \"kubernetes.io/projected/9841bda3-b354-449f-ac31-65752f2735b8-kube-api-access-v5ctn\") pod \"barbicanc51a-account-delete-zxwrl\" (UID: \"9841bda3-b354-449f-ac31-65752f2735b8\") " pod="swift-kuttl-tests/barbicanc51a-account-delete-zxwrl" Jan 29 12:34:30 crc kubenswrapper[4753]: I0129 12:34:30.797785 4753 scope.go:117] "RemoveContainer" containerID="b3818d112e3c7af7b22d4dc943951ccfb82acd520a7a99de288b20ecf4303c44" Jan 29 12:34:30 crc kubenswrapper[4753]: I0129 12:34:30.827069 4753 generic.go:334] "Generic (PLEG): container finished" podID="3f4a7f8b-0fa0-48f7-a3c1-8dbaf4a4f852" containerID="6d0de795d293a9496cc57597275c0e8e4aac8929167336a966effd96d47af5b8" exitCode=143 Jan 29 12:34:30 crc kubenswrapper[4753]: I0129 12:34:30.827170 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/barbican-keystone-listener-55fbcf8568-mtgrc" event={"ID":"3f4a7f8b-0fa0-48f7-a3c1-8dbaf4a4f852","Type":"ContainerDied","Data":"6d0de795d293a9496cc57597275c0e8e4aac8929167336a966effd96d47af5b8"} Jan 29 12:34:30 crc kubenswrapper[4753]: I0129 12:34:30.829712 4753 generic.go:334] "Generic (PLEG): container finished" podID="ee7aaa1d-e1b4-4ec4-8ef6-2bffb1fa71e1" containerID="62c7539c2fe6cfc8d7dc37561f9c1101de0457fb66f73fdf92cfc51c43d3c6e2" exitCode=143 Jan 29 12:34:30 crc kubenswrapper[4753]: I0129 12:34:30.829786 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/barbican-api-6b47c5b866-9jg8k" event={"ID":"ee7aaa1d-e1b4-4ec4-8ef6-2bffb1fa71e1","Type":"ContainerDied","Data":"62c7539c2fe6cfc8d7dc37561f9c1101de0457fb66f73fdf92cfc51c43d3c6e2"} Jan 29 12:34:30 crc kubenswrapper[4753]: I0129 12:34:30.831758 4753 generic.go:334] "Generic (PLEG): container finished" podID="49d7637b-85ec-47c7-bf42-a55aa5a8b8dd" containerID="fc8fc10b9da2aaaf37221ef3ec40f59ab9e07800d4b88fe042c4833f0e2810c5" exitCode=143 Jan 29 12:34:30 crc kubenswrapper[4753]: I0129 12:34:30.831793 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/barbican-worker-d6d6cc49c-6q5wj" event={"ID":"49d7637b-85ec-47c7-bf42-a55aa5a8b8dd","Type":"ContainerDied","Data":"fc8fc10b9da2aaaf37221ef3ec40f59ab9e07800d4b88fe042c4833f0e2810c5"} Jan 29 12:34:30 crc kubenswrapper[4753]: I0129 12:34:30.836967 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/barbicanc51a-account-delete-zxwrl" Jan 29 12:34:30 crc kubenswrapper[4753]: I0129 12:34:30.856731 4753 scope.go:117] "RemoveContainer" containerID="f7f5aab11c2304c788f61add30450086c7482019316e81864a6f9044c0d8019e" Jan 29 12:34:30 crc kubenswrapper[4753]: I0129 12:34:30.882708 4753 scope.go:117] "RemoveContainer" containerID="2bd47ee4d4bdbbab245ac4269049ddfdcc62ea915a2cd8f3521b019a267507cc" Jan 29 12:34:31 crc kubenswrapper[4753]: I0129 12:34:31.413421 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/barbicanc51a-account-delete-zxwrl"] Jan 29 12:34:31 crc kubenswrapper[4753]: I0129 12:34:31.842009 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/barbicanc51a-account-delete-zxwrl" event={"ID":"9841bda3-b354-449f-ac31-65752f2735b8","Type":"ContainerStarted","Data":"6cd90826cbfa1c9275a18621bfa293a9827512708c25b5ef58bd73d8e2ed3f1d"} Jan 29 12:34:31 crc kubenswrapper[4753]: I0129 12:34:31.897705 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e51f25a1-62d9-4b9b-8705-82187d0bd515" path="/var/lib/kubelet/pods/e51f25a1-62d9-4b9b-8705-82187d0bd515/volumes" Jan 29 12:34:32 crc kubenswrapper[4753]: I0129 12:34:32.852803 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/barbicanc51a-account-delete-zxwrl" event={"ID":"9841bda3-b354-449f-ac31-65752f2735b8","Type":"ContainerStarted","Data":"c71356fa86b470c8f7e0cf6bcc6682c8b13a810d9eb2248d5468458188761f33"} Jan 29 12:34:32 crc kubenswrapper[4753]: I0129 12:34:32.856492 4753 generic.go:334] "Generic (PLEG): container finished" podID="3f4a7f8b-0fa0-48f7-a3c1-8dbaf4a4f852" containerID="1991d457dadef2edd40ec30edf373e24eb3017e81ed995747af5afd3c5855f98" exitCode=0 Jan 29 12:34:32 crc kubenswrapper[4753]: I0129 12:34:32.856570 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/barbican-keystone-listener-55fbcf8568-mtgrc" event={"ID":"3f4a7f8b-0fa0-48f7-a3c1-8dbaf4a4f852","Type":"ContainerDied","Data":"1991d457dadef2edd40ec30edf373e24eb3017e81ed995747af5afd3c5855f98"} Jan 29 12:34:32 crc kubenswrapper[4753]: I0129 12:34:32.868715 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="swift-kuttl-tests/barbicanc51a-account-delete-zxwrl" podStartSLOduration=2.8686692000000003 podStartE2EDuration="2.8686692s" podCreationTimestamp="2026-01-29 12:34:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:34:32.868649699 +0000 UTC m=+1687.120731154" watchObservedRunningTime="2026-01-29 12:34:32.8686692 +0000 UTC m=+1687.120750655" Jan 29 12:34:32 crc kubenswrapper[4753]: I0129 12:34:32.871518 4753 generic.go:334] "Generic (PLEG): container finished" podID="49d7637b-85ec-47c7-bf42-a55aa5a8b8dd" containerID="c62784be0ecc6f613d61e3d10582c6f028f1c1dea1033542d99572833441d2cd" exitCode=0 Jan 29 12:34:32 crc kubenswrapper[4753]: I0129 12:34:32.871595 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/barbican-worker-d6d6cc49c-6q5wj" event={"ID":"49d7637b-85ec-47c7-bf42-a55aa5a8b8dd","Type":"ContainerDied","Data":"c62784be0ecc6f613d61e3d10582c6f028f1c1dea1033542d99572833441d2cd"} Jan 29 12:34:32 crc kubenswrapper[4753]: I0129 12:34:32.984489 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/barbican-keystone-listener-55fbcf8568-mtgrc" Jan 29 12:34:32 crc kubenswrapper[4753]: I0129 12:34:32.994358 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/barbican-worker-d6d6cc49c-6q5wj" Jan 29 12:34:33 crc kubenswrapper[4753]: I0129 12:34:33.106574 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3f4a7f8b-0fa0-48f7-a3c1-8dbaf4a4f852-config-data-custom\") pod \"3f4a7f8b-0fa0-48f7-a3c1-8dbaf4a4f852\" (UID: \"3f4a7f8b-0fa0-48f7-a3c1-8dbaf4a4f852\") " Jan 29 12:34:33 crc kubenswrapper[4753]: I0129 12:34:33.106651 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3f4a7f8b-0fa0-48f7-a3c1-8dbaf4a4f852-logs\") pod \"3f4a7f8b-0fa0-48f7-a3c1-8dbaf4a4f852\" (UID: \"3f4a7f8b-0fa0-48f7-a3c1-8dbaf4a4f852\") " Jan 29 12:34:33 crc kubenswrapper[4753]: I0129 12:34:33.106676 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/49d7637b-85ec-47c7-bf42-a55aa5a8b8dd-logs\") pod \"49d7637b-85ec-47c7-bf42-a55aa5a8b8dd\" (UID: \"49d7637b-85ec-47c7-bf42-a55aa5a8b8dd\") " Jan 29 12:34:33 crc kubenswrapper[4753]: I0129 12:34:33.106751 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9l6b4\" (UniqueName: \"kubernetes.io/projected/3f4a7f8b-0fa0-48f7-a3c1-8dbaf4a4f852-kube-api-access-9l6b4\") pod \"3f4a7f8b-0fa0-48f7-a3c1-8dbaf4a4f852\" (UID: \"3f4a7f8b-0fa0-48f7-a3c1-8dbaf4a4f852\") " Jan 29 12:34:33 crc kubenswrapper[4753]: I0129 12:34:33.106795 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8dvtp\" (UniqueName: \"kubernetes.io/projected/49d7637b-85ec-47c7-bf42-a55aa5a8b8dd-kube-api-access-8dvtp\") pod \"49d7637b-85ec-47c7-bf42-a55aa5a8b8dd\" (UID: \"49d7637b-85ec-47c7-bf42-a55aa5a8b8dd\") " Jan 29 12:34:33 crc kubenswrapper[4753]: I0129 12:34:33.106850 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/49d7637b-85ec-47c7-bf42-a55aa5a8b8dd-config-data\") pod \"49d7637b-85ec-47c7-bf42-a55aa5a8b8dd\" (UID: \"49d7637b-85ec-47c7-bf42-a55aa5a8b8dd\") " Jan 29 12:34:33 crc kubenswrapper[4753]: I0129 12:34:33.106959 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3f4a7f8b-0fa0-48f7-a3c1-8dbaf4a4f852-config-data\") pod \"3f4a7f8b-0fa0-48f7-a3c1-8dbaf4a4f852\" (UID: \"3f4a7f8b-0fa0-48f7-a3c1-8dbaf4a4f852\") " Jan 29 12:34:33 crc kubenswrapper[4753]: I0129 12:34:33.107022 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/49d7637b-85ec-47c7-bf42-a55aa5a8b8dd-config-data-custom\") pod \"49d7637b-85ec-47c7-bf42-a55aa5a8b8dd\" (UID: \"49d7637b-85ec-47c7-bf42-a55aa5a8b8dd\") " Jan 29 12:34:33 crc kubenswrapper[4753]: I0129 12:34:33.107219 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3f4a7f8b-0fa0-48f7-a3c1-8dbaf4a4f852-logs" (OuterVolumeSpecName: "logs") pod "3f4a7f8b-0fa0-48f7-a3c1-8dbaf4a4f852" (UID: "3f4a7f8b-0fa0-48f7-a3c1-8dbaf4a4f852"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:34:33 crc kubenswrapper[4753]: I0129 12:34:33.107356 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/49d7637b-85ec-47c7-bf42-a55aa5a8b8dd-logs" (OuterVolumeSpecName: "logs") pod "49d7637b-85ec-47c7-bf42-a55aa5a8b8dd" (UID: "49d7637b-85ec-47c7-bf42-a55aa5a8b8dd"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:34:33 crc kubenswrapper[4753]: I0129 12:34:33.122808 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49d7637b-85ec-47c7-bf42-a55aa5a8b8dd-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "49d7637b-85ec-47c7-bf42-a55aa5a8b8dd" (UID: "49d7637b-85ec-47c7-bf42-a55aa5a8b8dd"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:34:33 crc kubenswrapper[4753]: I0129 12:34:33.123112 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3f4a7f8b-0fa0-48f7-a3c1-8dbaf4a4f852-kube-api-access-9l6b4" (OuterVolumeSpecName: "kube-api-access-9l6b4") pod "3f4a7f8b-0fa0-48f7-a3c1-8dbaf4a4f852" (UID: "3f4a7f8b-0fa0-48f7-a3c1-8dbaf4a4f852"). InnerVolumeSpecName "kube-api-access-9l6b4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:34:33 crc kubenswrapper[4753]: I0129 12:34:33.124702 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49d7637b-85ec-47c7-bf42-a55aa5a8b8dd-kube-api-access-8dvtp" (OuterVolumeSpecName: "kube-api-access-8dvtp") pod "49d7637b-85ec-47c7-bf42-a55aa5a8b8dd" (UID: "49d7637b-85ec-47c7-bf42-a55aa5a8b8dd"). InnerVolumeSpecName "kube-api-access-8dvtp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:34:33 crc kubenswrapper[4753]: I0129 12:34:33.130344 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3f4a7f8b-0fa0-48f7-a3c1-8dbaf4a4f852-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "3f4a7f8b-0fa0-48f7-a3c1-8dbaf4a4f852" (UID: "3f4a7f8b-0fa0-48f7-a3c1-8dbaf4a4f852"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:34:33 crc kubenswrapper[4753]: I0129 12:34:33.148589 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3f4a7f8b-0fa0-48f7-a3c1-8dbaf4a4f852-config-data" (OuterVolumeSpecName: "config-data") pod "3f4a7f8b-0fa0-48f7-a3c1-8dbaf4a4f852" (UID: "3f4a7f8b-0fa0-48f7-a3c1-8dbaf4a4f852"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:34:33 crc kubenswrapper[4753]: I0129 12:34:33.159760 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49d7637b-85ec-47c7-bf42-a55aa5a8b8dd-config-data" (OuterVolumeSpecName: "config-data") pod "49d7637b-85ec-47c7-bf42-a55aa5a8b8dd" (UID: "49d7637b-85ec-47c7-bf42-a55aa5a8b8dd"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:34:33 crc kubenswrapper[4753]: I0129 12:34:33.208473 4753 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3f4a7f8b-0fa0-48f7-a3c1-8dbaf4a4f852-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 12:34:33 crc kubenswrapper[4753]: I0129 12:34:33.208518 4753 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/49d7637b-85ec-47c7-bf42-a55aa5a8b8dd-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 29 12:34:33 crc kubenswrapper[4753]: I0129 12:34:33.208530 4753 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3f4a7f8b-0fa0-48f7-a3c1-8dbaf4a4f852-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 29 12:34:33 crc kubenswrapper[4753]: I0129 12:34:33.208539 4753 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3f4a7f8b-0fa0-48f7-a3c1-8dbaf4a4f852-logs\") on node \"crc\" DevicePath \"\"" Jan 29 12:34:33 crc kubenswrapper[4753]: I0129 12:34:33.208548 4753 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/49d7637b-85ec-47c7-bf42-a55aa5a8b8dd-logs\") on node \"crc\" DevicePath \"\"" Jan 29 12:34:33 crc kubenswrapper[4753]: I0129 12:34:33.208557 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9l6b4\" (UniqueName: \"kubernetes.io/projected/3f4a7f8b-0fa0-48f7-a3c1-8dbaf4a4f852-kube-api-access-9l6b4\") on node \"crc\" DevicePath \"\"" Jan 29 12:34:33 crc kubenswrapper[4753]: I0129 12:34:33.208565 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8dvtp\" (UniqueName: \"kubernetes.io/projected/49d7637b-85ec-47c7-bf42-a55aa5a8b8dd-kube-api-access-8dvtp\") on node \"crc\" DevicePath \"\"" Jan 29 12:34:33 crc kubenswrapper[4753]: I0129 12:34:33.208573 4753 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/49d7637b-85ec-47c7-bf42-a55aa5a8b8dd-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 12:34:33 crc kubenswrapper[4753]: I0129 12:34:33.888213 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/barbican-keystone-listener-55fbcf8568-mtgrc" Jan 29 12:34:33 crc kubenswrapper[4753]: I0129 12:34:33.891481 4753 generic.go:334] "Generic (PLEG): container finished" podID="ee7aaa1d-e1b4-4ec4-8ef6-2bffb1fa71e1" containerID="f5db0ea18c845c065ed6db5307b995c2d28fb39b39c926836dba6ea02f233fcc" exitCode=0 Jan 29 12:34:33 crc kubenswrapper[4753]: I0129 12:34:33.894613 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/barbican-worker-d6d6cc49c-6q5wj" Jan 29 12:34:33 crc kubenswrapper[4753]: I0129 12:34:33.898061 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/barbican-keystone-listener-55fbcf8568-mtgrc" event={"ID":"3f4a7f8b-0fa0-48f7-a3c1-8dbaf4a4f852","Type":"ContainerDied","Data":"4249fde024f27500b7c32ec094d162b44af9829f8398d9e7dd23e1a540472c6c"} Jan 29 12:34:33 crc kubenswrapper[4753]: I0129 12:34:33.898111 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/barbican-api-6b47c5b866-9jg8k" event={"ID":"ee7aaa1d-e1b4-4ec4-8ef6-2bffb1fa71e1","Type":"ContainerDied","Data":"f5db0ea18c845c065ed6db5307b995c2d28fb39b39c926836dba6ea02f233fcc"} Jan 29 12:34:33 crc kubenswrapper[4753]: I0129 12:34:33.898132 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/barbican-worker-d6d6cc49c-6q5wj" event={"ID":"49d7637b-85ec-47c7-bf42-a55aa5a8b8dd","Type":"ContainerDied","Data":"04e7228e9d6f786fa8e128a6d95d50d9703b866084b686b4f155547cc8d1557e"} Jan 29 12:34:33 crc kubenswrapper[4753]: I0129 12:34:33.898157 4753 scope.go:117] "RemoveContainer" containerID="1991d457dadef2edd40ec30edf373e24eb3017e81ed995747af5afd3c5855f98" Jan 29 12:34:33 crc kubenswrapper[4753]: I0129 12:34:33.939841 4753 scope.go:117] "RemoveContainer" containerID="6d0de795d293a9496cc57597275c0e8e4aac8929167336a966effd96d47af5b8" Jan 29 12:34:33 crc kubenswrapper[4753]: I0129 12:34:33.950001 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["swift-kuttl-tests/barbican-keystone-listener-55fbcf8568-mtgrc"] Jan 29 12:34:33 crc kubenswrapper[4753]: I0129 12:34:33.958453 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["swift-kuttl-tests/barbican-keystone-listener-55fbcf8568-mtgrc"] Jan 29 12:34:33 crc kubenswrapper[4753]: I0129 12:34:33.970032 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["swift-kuttl-tests/barbican-worker-d6d6cc49c-6q5wj"] Jan 29 12:34:33 crc kubenswrapper[4753]: I0129 12:34:33.973688 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["swift-kuttl-tests/barbican-worker-d6d6cc49c-6q5wj"] Jan 29 12:34:34 crc kubenswrapper[4753]: I0129 12:34:34.009681 4753 scope.go:117] "RemoveContainer" containerID="c62784be0ecc6f613d61e3d10582c6f028f1c1dea1033542d99572833441d2cd" Jan 29 12:34:34 crc kubenswrapper[4753]: I0129 12:34:34.048329 4753 scope.go:117] "RemoveContainer" containerID="fc8fc10b9da2aaaf37221ef3ec40f59ab9e07800d4b88fe042c4833f0e2810c5" Jan 29 12:34:34 crc kubenswrapper[4753]: I0129 12:34:34.179071 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/barbican-api-6b47c5b866-9jg8k" Jan 29 12:34:34 crc kubenswrapper[4753]: I0129 12:34:34.392092 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee7aaa1d-e1b4-4ec4-8ef6-2bffb1fa71e1-config-data\") pod \"ee7aaa1d-e1b4-4ec4-8ef6-2bffb1fa71e1\" (UID: \"ee7aaa1d-e1b4-4ec4-8ef6-2bffb1fa71e1\") " Jan 29 12:34:34 crc kubenswrapper[4753]: I0129 12:34:34.392237 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ee7aaa1d-e1b4-4ec4-8ef6-2bffb1fa71e1-logs\") pod \"ee7aaa1d-e1b4-4ec4-8ef6-2bffb1fa71e1\" (UID: \"ee7aaa1d-e1b4-4ec4-8ef6-2bffb1fa71e1\") " Jan 29 12:34:34 crc kubenswrapper[4753]: I0129 12:34:34.392268 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ee7aaa1d-e1b4-4ec4-8ef6-2bffb1fa71e1-config-data-custom\") pod \"ee7aaa1d-e1b4-4ec4-8ef6-2bffb1fa71e1\" (UID: \"ee7aaa1d-e1b4-4ec4-8ef6-2bffb1fa71e1\") " Jan 29 12:34:34 crc kubenswrapper[4753]: I0129 12:34:34.392288 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7994k\" (UniqueName: \"kubernetes.io/projected/ee7aaa1d-e1b4-4ec4-8ef6-2bffb1fa71e1-kube-api-access-7994k\") pod \"ee7aaa1d-e1b4-4ec4-8ef6-2bffb1fa71e1\" (UID: \"ee7aaa1d-e1b4-4ec4-8ef6-2bffb1fa71e1\") " Jan 29 12:34:34 crc kubenswrapper[4753]: I0129 12:34:34.394037 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ee7aaa1d-e1b4-4ec4-8ef6-2bffb1fa71e1-logs" (OuterVolumeSpecName: "logs") pod "ee7aaa1d-e1b4-4ec4-8ef6-2bffb1fa71e1" (UID: "ee7aaa1d-e1b4-4ec4-8ef6-2bffb1fa71e1"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:34:34 crc kubenswrapper[4753]: I0129 12:34:34.406697 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ee7aaa1d-e1b4-4ec4-8ef6-2bffb1fa71e1-kube-api-access-7994k" (OuterVolumeSpecName: "kube-api-access-7994k") pod "ee7aaa1d-e1b4-4ec4-8ef6-2bffb1fa71e1" (UID: "ee7aaa1d-e1b4-4ec4-8ef6-2bffb1fa71e1"). InnerVolumeSpecName "kube-api-access-7994k". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:34:34 crc kubenswrapper[4753]: I0129 12:34:34.415565 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ee7aaa1d-e1b4-4ec4-8ef6-2bffb1fa71e1-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "ee7aaa1d-e1b4-4ec4-8ef6-2bffb1fa71e1" (UID: "ee7aaa1d-e1b4-4ec4-8ef6-2bffb1fa71e1"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:34:34 crc kubenswrapper[4753]: I0129 12:34:34.452217 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ee7aaa1d-e1b4-4ec4-8ef6-2bffb1fa71e1-config-data" (OuterVolumeSpecName: "config-data") pod "ee7aaa1d-e1b4-4ec4-8ef6-2bffb1fa71e1" (UID: "ee7aaa1d-e1b4-4ec4-8ef6-2bffb1fa71e1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:34:34 crc kubenswrapper[4753]: I0129 12:34:34.494493 4753 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee7aaa1d-e1b4-4ec4-8ef6-2bffb1fa71e1-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 12:34:34 crc kubenswrapper[4753]: I0129 12:34:34.494572 4753 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ee7aaa1d-e1b4-4ec4-8ef6-2bffb1fa71e1-logs\") on node \"crc\" DevicePath \"\"" Jan 29 12:34:34 crc kubenswrapper[4753]: I0129 12:34:34.494590 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7994k\" (UniqueName: \"kubernetes.io/projected/ee7aaa1d-e1b4-4ec4-8ef6-2bffb1fa71e1-kube-api-access-7994k\") on node \"crc\" DevicePath \"\"" Jan 29 12:34:34 crc kubenswrapper[4753]: I0129 12:34:34.494633 4753 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ee7aaa1d-e1b4-4ec4-8ef6-2bffb1fa71e1-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 29 12:34:34 crc kubenswrapper[4753]: I0129 12:34:34.907051 4753 generic.go:334] "Generic (PLEG): container finished" podID="9841bda3-b354-449f-ac31-65752f2735b8" containerID="c71356fa86b470c8f7e0cf6bcc6682c8b13a810d9eb2248d5468458188761f33" exitCode=0 Jan 29 12:34:34 crc kubenswrapper[4753]: I0129 12:34:34.907196 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/barbicanc51a-account-delete-zxwrl" event={"ID":"9841bda3-b354-449f-ac31-65752f2735b8","Type":"ContainerDied","Data":"c71356fa86b470c8f7e0cf6bcc6682c8b13a810d9eb2248d5468458188761f33"} Jan 29 12:34:34 crc kubenswrapper[4753]: I0129 12:34:34.911675 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/barbican-api-6b47c5b866-9jg8k" Jan 29 12:34:34 crc kubenswrapper[4753]: I0129 12:34:34.911686 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/barbican-api-6b47c5b866-9jg8k" event={"ID":"ee7aaa1d-e1b4-4ec4-8ef6-2bffb1fa71e1","Type":"ContainerDied","Data":"9da35642687773f59fcbf13d7a9bc82da7ccb97e310ac00f086c7aa70b652465"} Jan 29 12:34:34 crc kubenswrapper[4753]: I0129 12:34:34.912279 4753 scope.go:117] "RemoveContainer" containerID="f5db0ea18c845c065ed6db5307b995c2d28fb39b39c926836dba6ea02f233fcc" Jan 29 12:34:35 crc kubenswrapper[4753]: I0129 12:34:35.126527 4753 scope.go:117] "RemoveContainer" containerID="62c7539c2fe6cfc8d7dc37561f9c1101de0457fb66f73fdf92cfc51c43d3c6e2" Jan 29 12:34:35 crc kubenswrapper[4753]: I0129 12:34:35.145283 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["swift-kuttl-tests/barbican-api-6b47c5b866-9jg8k"] Jan 29 12:34:35 crc kubenswrapper[4753]: I0129 12:34:35.159651 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["swift-kuttl-tests/barbican-api-6b47c5b866-9jg8k"] Jan 29 12:34:35 crc kubenswrapper[4753]: I0129 12:34:35.215027 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["swift-kuttl-tests/keystone-db-sync-8ljnq"] Jan 29 12:34:35 crc kubenswrapper[4753]: I0129 12:34:35.244888 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["swift-kuttl-tests/keystone-bootstrap-fgpnh"] Jan 29 12:34:35 crc kubenswrapper[4753]: I0129 12:34:35.251445 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["swift-kuttl-tests/keystone-db-sync-8ljnq"] Jan 29 12:34:35 crc kubenswrapper[4753]: I0129 12:34:35.258796 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["swift-kuttl-tests/keystonedfb1-account-delete-t7d55"] Jan 29 12:34:35 crc kubenswrapper[4753]: E0129 12:34:35.259190 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ee7aaa1d-e1b4-4ec4-8ef6-2bffb1fa71e1" containerName="barbican-api" Jan 29 12:34:35 crc kubenswrapper[4753]: I0129 12:34:35.259213 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="ee7aaa1d-e1b4-4ec4-8ef6-2bffb1fa71e1" containerName="barbican-api" Jan 29 12:34:35 crc kubenswrapper[4753]: E0129 12:34:35.259250 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3f4a7f8b-0fa0-48f7-a3c1-8dbaf4a4f852" containerName="barbican-keystone-listener-log" Jan 29 12:34:35 crc kubenswrapper[4753]: I0129 12:34:35.259261 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="3f4a7f8b-0fa0-48f7-a3c1-8dbaf4a4f852" containerName="barbican-keystone-listener-log" Jan 29 12:34:35 crc kubenswrapper[4753]: E0129 12:34:35.259279 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="49d7637b-85ec-47c7-bf42-a55aa5a8b8dd" containerName="barbican-worker-log" Jan 29 12:34:35 crc kubenswrapper[4753]: I0129 12:34:35.259288 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="49d7637b-85ec-47c7-bf42-a55aa5a8b8dd" containerName="barbican-worker-log" Jan 29 12:34:35 crc kubenswrapper[4753]: E0129 12:34:35.259301 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ee7aaa1d-e1b4-4ec4-8ef6-2bffb1fa71e1" containerName="barbican-api-log" Jan 29 12:34:35 crc kubenswrapper[4753]: I0129 12:34:35.259309 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="ee7aaa1d-e1b4-4ec4-8ef6-2bffb1fa71e1" containerName="barbican-api-log" Jan 29 12:34:35 crc kubenswrapper[4753]: E0129 12:34:35.259330 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3f4a7f8b-0fa0-48f7-a3c1-8dbaf4a4f852" containerName="barbican-keystone-listener" Jan 29 12:34:35 crc kubenswrapper[4753]: I0129 12:34:35.259338 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="3f4a7f8b-0fa0-48f7-a3c1-8dbaf4a4f852" containerName="barbican-keystone-listener" Jan 29 12:34:35 crc kubenswrapper[4753]: E0129 12:34:35.259351 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="49d7637b-85ec-47c7-bf42-a55aa5a8b8dd" containerName="barbican-worker" Jan 29 12:34:35 crc kubenswrapper[4753]: I0129 12:34:35.259360 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="49d7637b-85ec-47c7-bf42-a55aa5a8b8dd" containerName="barbican-worker" Jan 29 12:34:35 crc kubenswrapper[4753]: I0129 12:34:35.259564 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="3f4a7f8b-0fa0-48f7-a3c1-8dbaf4a4f852" containerName="barbican-keystone-listener-log" Jan 29 12:34:35 crc kubenswrapper[4753]: I0129 12:34:35.259583 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="ee7aaa1d-e1b4-4ec4-8ef6-2bffb1fa71e1" containerName="barbican-api" Jan 29 12:34:35 crc kubenswrapper[4753]: I0129 12:34:35.259595 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="ee7aaa1d-e1b4-4ec4-8ef6-2bffb1fa71e1" containerName="barbican-api-log" Jan 29 12:34:35 crc kubenswrapper[4753]: I0129 12:34:35.259609 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="3f4a7f8b-0fa0-48f7-a3c1-8dbaf4a4f852" containerName="barbican-keystone-listener" Jan 29 12:34:35 crc kubenswrapper[4753]: I0129 12:34:35.259619 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="49d7637b-85ec-47c7-bf42-a55aa5a8b8dd" containerName="barbican-worker" Jan 29 12:34:35 crc kubenswrapper[4753]: I0129 12:34:35.259630 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="49d7637b-85ec-47c7-bf42-a55aa5a8b8dd" containerName="barbican-worker-log" Jan 29 12:34:35 crc kubenswrapper[4753]: I0129 12:34:35.260365 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/keystonedfb1-account-delete-t7d55" Jan 29 12:34:35 crc kubenswrapper[4753]: I0129 12:34:35.267925 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["swift-kuttl-tests/keystone-bootstrap-fgpnh"] Jan 29 12:34:35 crc kubenswrapper[4753]: I0129 12:34:35.282515 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["swift-kuttl-tests/keystone-66595c964d-6hnff"] Jan 29 12:34:35 crc kubenswrapper[4753]: I0129 12:34:35.282766 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/keystone-66595c964d-6hnff" podUID="7832353e-7e00-44af-bd6a-9227ef58fd05" containerName="keystone-api" containerID="cri-o://56e14626896d6f1f29b27a6eed7fc588033ff40f6c37175ca5a67638ead7c7b0" gracePeriod=30 Jan 29 12:34:35 crc kubenswrapper[4753]: I0129 12:34:35.292952 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/keystonedfb1-account-delete-t7d55"] Jan 29 12:34:35 crc kubenswrapper[4753]: I0129 12:34:35.433443 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/74538c68-72a6-44a4-a906-3cf0a864a392-operator-scripts\") pod \"keystonedfb1-account-delete-t7d55\" (UID: \"74538c68-72a6-44a4-a906-3cf0a864a392\") " pod="swift-kuttl-tests/keystonedfb1-account-delete-t7d55" Jan 29 12:34:35 crc kubenswrapper[4753]: I0129 12:34:35.433499 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tfd72\" (UniqueName: \"kubernetes.io/projected/74538c68-72a6-44a4-a906-3cf0a864a392-kube-api-access-tfd72\") pod \"keystonedfb1-account-delete-t7d55\" (UID: \"74538c68-72a6-44a4-a906-3cf0a864a392\") " pod="swift-kuttl-tests/keystonedfb1-account-delete-t7d55" Jan 29 12:34:35 crc kubenswrapper[4753]: I0129 12:34:35.534893 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tfd72\" (UniqueName: \"kubernetes.io/projected/74538c68-72a6-44a4-a906-3cf0a864a392-kube-api-access-tfd72\") pod \"keystonedfb1-account-delete-t7d55\" (UID: \"74538c68-72a6-44a4-a906-3cf0a864a392\") " pod="swift-kuttl-tests/keystonedfb1-account-delete-t7d55" Jan 29 12:34:35 crc kubenswrapper[4753]: I0129 12:34:35.535920 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/74538c68-72a6-44a4-a906-3cf0a864a392-operator-scripts\") pod \"keystonedfb1-account-delete-t7d55\" (UID: \"74538c68-72a6-44a4-a906-3cf0a864a392\") " pod="swift-kuttl-tests/keystonedfb1-account-delete-t7d55" Jan 29 12:34:35 crc kubenswrapper[4753]: I0129 12:34:35.536821 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/74538c68-72a6-44a4-a906-3cf0a864a392-operator-scripts\") pod \"keystonedfb1-account-delete-t7d55\" (UID: \"74538c68-72a6-44a4-a906-3cf0a864a392\") " pod="swift-kuttl-tests/keystonedfb1-account-delete-t7d55" Jan 29 12:34:35 crc kubenswrapper[4753]: I0129 12:34:35.554142 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tfd72\" (UniqueName: \"kubernetes.io/projected/74538c68-72a6-44a4-a906-3cf0a864a392-kube-api-access-tfd72\") pod \"keystonedfb1-account-delete-t7d55\" (UID: \"74538c68-72a6-44a4-a906-3cf0a864a392\") " pod="swift-kuttl-tests/keystonedfb1-account-delete-t7d55" Jan 29 12:34:35 crc kubenswrapper[4753]: I0129 12:34:35.581738 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/keystonedfb1-account-delete-t7d55" Jan 29 12:34:35 crc kubenswrapper[4753]: I0129 12:34:35.792334 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/keystonedfb1-account-delete-t7d55"] Jan 29 12:34:35 crc kubenswrapper[4753]: I0129 12:34:35.898443 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3f4a7f8b-0fa0-48f7-a3c1-8dbaf4a4f852" path="/var/lib/kubelet/pods/3f4a7f8b-0fa0-48f7-a3c1-8dbaf4a4f852/volumes" Jan 29 12:34:35 crc kubenswrapper[4753]: I0129 12:34:35.899529 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="404ab5c7-422b-4264-baf7-64ab2857eb4b" path="/var/lib/kubelet/pods/404ab5c7-422b-4264-baf7-64ab2857eb4b/volumes" Jan 29 12:34:35 crc kubenswrapper[4753]: I0129 12:34:35.900066 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49d7637b-85ec-47c7-bf42-a55aa5a8b8dd" path="/var/lib/kubelet/pods/49d7637b-85ec-47c7-bf42-a55aa5a8b8dd/volumes" Jan 29 12:34:35 crc kubenswrapper[4753]: I0129 12:34:35.901625 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="983c251f-33e5-42b4-a00e-5fc29467892d" path="/var/lib/kubelet/pods/983c251f-33e5-42b4-a00e-5fc29467892d/volumes" Jan 29 12:34:35 crc kubenswrapper[4753]: I0129 12:34:35.902301 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ee7aaa1d-e1b4-4ec4-8ef6-2bffb1fa71e1" path="/var/lib/kubelet/pods/ee7aaa1d-e1b4-4ec4-8ef6-2bffb1fa71e1/volumes" Jan 29 12:34:35 crc kubenswrapper[4753]: I0129 12:34:35.928779 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/keystonedfb1-account-delete-t7d55" event={"ID":"74538c68-72a6-44a4-a906-3cf0a864a392","Type":"ContainerStarted","Data":"b1b3638c69b4bd5a3d916cc5dc4822fc6155c8882f6b4b057bb06541087b3cae"} Jan 29 12:34:37 crc kubenswrapper[4753]: I0129 12:34:37.059182 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/keystonedfb1-account-delete-t7d55" event={"ID":"74538c68-72a6-44a4-a906-3cf0a864a392","Type":"ContainerStarted","Data":"f01bf5128c8e96d8ceb312c4f1b44ad410f48f5c54623eead170d31ce0311c5a"} Jan 29 12:34:37 crc kubenswrapper[4753]: I0129 12:34:37.073530 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/barbicanc51a-account-delete-zxwrl" event={"ID":"9841bda3-b354-449f-ac31-65752f2735b8","Type":"ContainerDied","Data":"6cd90826cbfa1c9275a18621bfa293a9827512708c25b5ef58bd73d8e2ed3f1d"} Jan 29 12:34:37 crc kubenswrapper[4753]: I0129 12:34:37.073588 4753 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6cd90826cbfa1c9275a18621bfa293a9827512708c25b5ef58bd73d8e2ed3f1d" Jan 29 12:34:37 crc kubenswrapper[4753]: I0129 12:34:37.081504 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/barbicanc51a-account-delete-zxwrl" Jan 29 12:34:37 crc kubenswrapper[4753]: I0129 12:34:37.089446 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="swift-kuttl-tests/keystonedfb1-account-delete-t7d55" podStartSLOduration=2.089420175 podStartE2EDuration="2.089420175s" podCreationTimestamp="2026-01-29 12:34:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:34:37.085639827 +0000 UTC m=+1691.337721282" watchObservedRunningTime="2026-01-29 12:34:37.089420175 +0000 UTC m=+1691.341501630" Jan 29 12:34:37 crc kubenswrapper[4753]: I0129 12:34:37.497160 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v5ctn\" (UniqueName: \"kubernetes.io/projected/9841bda3-b354-449f-ac31-65752f2735b8-kube-api-access-v5ctn\") pod \"9841bda3-b354-449f-ac31-65752f2735b8\" (UID: \"9841bda3-b354-449f-ac31-65752f2735b8\") " Jan 29 12:34:37 crc kubenswrapper[4753]: I0129 12:34:37.497367 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9841bda3-b354-449f-ac31-65752f2735b8-operator-scripts\") pod \"9841bda3-b354-449f-ac31-65752f2735b8\" (UID: \"9841bda3-b354-449f-ac31-65752f2735b8\") " Jan 29 12:34:37 crc kubenswrapper[4753]: I0129 12:34:37.499147 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9841bda3-b354-449f-ac31-65752f2735b8-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "9841bda3-b354-449f-ac31-65752f2735b8" (UID: "9841bda3-b354-449f-ac31-65752f2735b8"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:34:37 crc kubenswrapper[4753]: I0129 12:34:37.713310 4753 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9841bda3-b354-449f-ac31-65752f2735b8-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 12:34:37 crc kubenswrapper[4753]: I0129 12:34:37.729661 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9841bda3-b354-449f-ac31-65752f2735b8-kube-api-access-v5ctn" (OuterVolumeSpecName: "kube-api-access-v5ctn") pod "9841bda3-b354-449f-ac31-65752f2735b8" (UID: "9841bda3-b354-449f-ac31-65752f2735b8"). InnerVolumeSpecName "kube-api-access-v5ctn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:34:37 crc kubenswrapper[4753]: I0129 12:34:37.814212 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v5ctn\" (UniqueName: \"kubernetes.io/projected/9841bda3-b354-449f-ac31-65752f2735b8-kube-api-access-v5ctn\") on node \"crc\" DevicePath \"\"" Jan 29 12:34:38 crc kubenswrapper[4753]: I0129 12:34:38.080836 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/barbicanc51a-account-delete-zxwrl" Jan 29 12:34:39 crc kubenswrapper[4753]: I0129 12:34:39.656149 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/keystone-66595c964d-6hnff" Jan 29 12:34:39 crc kubenswrapper[4753]: I0129 12:34:39.852482 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7832353e-7e00-44af-bd6a-9227ef58fd05-config-data\") pod \"7832353e-7e00-44af-bd6a-9227ef58fd05\" (UID: \"7832353e-7e00-44af-bd6a-9227ef58fd05\") " Jan 29 12:34:39 crc kubenswrapper[4753]: I0129 12:34:39.852890 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/7832353e-7e00-44af-bd6a-9227ef58fd05-fernet-keys\") pod \"7832353e-7e00-44af-bd6a-9227ef58fd05\" (UID: \"7832353e-7e00-44af-bd6a-9227ef58fd05\") " Jan 29 12:34:39 crc kubenswrapper[4753]: I0129 12:34:39.852989 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d29j4\" (UniqueName: \"kubernetes.io/projected/7832353e-7e00-44af-bd6a-9227ef58fd05-kube-api-access-d29j4\") pod \"7832353e-7e00-44af-bd6a-9227ef58fd05\" (UID: \"7832353e-7e00-44af-bd6a-9227ef58fd05\") " Jan 29 12:34:39 crc kubenswrapper[4753]: I0129 12:34:39.853554 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/7832353e-7e00-44af-bd6a-9227ef58fd05-credential-keys\") pod \"7832353e-7e00-44af-bd6a-9227ef58fd05\" (UID: \"7832353e-7e00-44af-bd6a-9227ef58fd05\") " Jan 29 12:34:39 crc kubenswrapper[4753]: I0129 12:34:39.853716 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7832353e-7e00-44af-bd6a-9227ef58fd05-scripts\") pod \"7832353e-7e00-44af-bd6a-9227ef58fd05\" (UID: \"7832353e-7e00-44af-bd6a-9227ef58fd05\") " Jan 29 12:34:39 crc kubenswrapper[4753]: I0129 12:34:39.858391 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7832353e-7e00-44af-bd6a-9227ef58fd05-kube-api-access-d29j4" (OuterVolumeSpecName: "kube-api-access-d29j4") pod "7832353e-7e00-44af-bd6a-9227ef58fd05" (UID: "7832353e-7e00-44af-bd6a-9227ef58fd05"). InnerVolumeSpecName "kube-api-access-d29j4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:34:39 crc kubenswrapper[4753]: I0129 12:34:39.858725 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7832353e-7e00-44af-bd6a-9227ef58fd05-scripts" (OuterVolumeSpecName: "scripts") pod "7832353e-7e00-44af-bd6a-9227ef58fd05" (UID: "7832353e-7e00-44af-bd6a-9227ef58fd05"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:34:39 crc kubenswrapper[4753]: I0129 12:34:39.858797 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7832353e-7e00-44af-bd6a-9227ef58fd05-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "7832353e-7e00-44af-bd6a-9227ef58fd05" (UID: "7832353e-7e00-44af-bd6a-9227ef58fd05"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:34:39 crc kubenswrapper[4753]: I0129 12:34:39.862519 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7832353e-7e00-44af-bd6a-9227ef58fd05-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "7832353e-7e00-44af-bd6a-9227ef58fd05" (UID: "7832353e-7e00-44af-bd6a-9227ef58fd05"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:34:39 crc kubenswrapper[4753]: I0129 12:34:39.876155 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7832353e-7e00-44af-bd6a-9227ef58fd05-config-data" (OuterVolumeSpecName: "config-data") pod "7832353e-7e00-44af-bd6a-9227ef58fd05" (UID: "7832353e-7e00-44af-bd6a-9227ef58fd05"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:34:39 crc kubenswrapper[4753]: I0129 12:34:39.955785 4753 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/7832353e-7e00-44af-bd6a-9227ef58fd05-fernet-keys\") on node \"crc\" DevicePath \"\"" Jan 29 12:34:39 crc kubenswrapper[4753]: I0129 12:34:39.955825 4753 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/7832353e-7e00-44af-bd6a-9227ef58fd05-credential-keys\") on node \"crc\" DevicePath \"\"" Jan 29 12:34:39 crc kubenswrapper[4753]: I0129 12:34:39.955838 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d29j4\" (UniqueName: \"kubernetes.io/projected/7832353e-7e00-44af-bd6a-9227ef58fd05-kube-api-access-d29j4\") on node \"crc\" DevicePath \"\"" Jan 29 12:34:39 crc kubenswrapper[4753]: I0129 12:34:39.955848 4753 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7832353e-7e00-44af-bd6a-9227ef58fd05-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 12:34:39 crc kubenswrapper[4753]: I0129 12:34:39.955856 4753 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7832353e-7e00-44af-bd6a-9227ef58fd05-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 12:34:40 crc kubenswrapper[4753]: I0129 12:34:40.096095 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["swift-kuttl-tests/root-account-create-update-h92qz"] Jan 29 12:34:40 crc kubenswrapper[4753]: E0129 12:34:40.096461 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9841bda3-b354-449f-ac31-65752f2735b8" containerName="mariadb-account-delete" Jan 29 12:34:40 crc kubenswrapper[4753]: I0129 12:34:40.096484 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="9841bda3-b354-449f-ac31-65752f2735b8" containerName="mariadb-account-delete" Jan 29 12:34:40 crc kubenswrapper[4753]: E0129 12:34:40.096514 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7832353e-7e00-44af-bd6a-9227ef58fd05" containerName="keystone-api" Jan 29 12:34:40 crc kubenswrapper[4753]: I0129 12:34:40.096520 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="7832353e-7e00-44af-bd6a-9227ef58fd05" containerName="keystone-api" Jan 29 12:34:40 crc kubenswrapper[4753]: I0129 12:34:40.096661 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="7832353e-7e00-44af-bd6a-9227ef58fd05" containerName="keystone-api" Jan 29 12:34:40 crc kubenswrapper[4753]: I0129 12:34:40.096695 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="9841bda3-b354-449f-ac31-65752f2735b8" containerName="mariadb-account-delete" Jan 29 12:34:40 crc kubenswrapper[4753]: I0129 12:34:40.097159 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/root-account-create-update-h92qz" Jan 29 12:34:40 crc kubenswrapper[4753]: I0129 12:34:40.099821 4753 reflector.go:368] Caches populated for *v1.Secret from object-"swift-kuttl-tests"/"openstack-mariadb-root-db-secret" Jan 29 12:34:40 crc kubenswrapper[4753]: I0129 12:34:40.099959 4753 generic.go:334] "Generic (PLEG): container finished" podID="7832353e-7e00-44af-bd6a-9227ef58fd05" containerID="56e14626896d6f1f29b27a6eed7fc588033ff40f6c37175ca5a67638ead7c7b0" exitCode=0 Jan 29 12:34:40 crc kubenswrapper[4753]: I0129 12:34:40.099994 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/keystone-66595c964d-6hnff" event={"ID":"7832353e-7e00-44af-bd6a-9227ef58fd05","Type":"ContainerDied","Data":"56e14626896d6f1f29b27a6eed7fc588033ff40f6c37175ca5a67638ead7c7b0"} Jan 29 12:34:40 crc kubenswrapper[4753]: I0129 12:34:40.100017 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/keystone-66595c964d-6hnff" event={"ID":"7832353e-7e00-44af-bd6a-9227ef58fd05","Type":"ContainerDied","Data":"b3a3769ab58213d819f63108d8a2310fcd2886e1cde5b8b7dd7e7967350ac5d8"} Jan 29 12:34:40 crc kubenswrapper[4753]: I0129 12:34:40.100034 4753 scope.go:117] "RemoveContainer" containerID="56e14626896d6f1f29b27a6eed7fc588033ff40f6c37175ca5a67638ead7c7b0" Jan 29 12:34:40 crc kubenswrapper[4753]: I0129 12:34:40.100162 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/keystone-66595c964d-6hnff" Jan 29 12:34:40 crc kubenswrapper[4753]: I0129 12:34:40.124426 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/root-account-create-update-h92qz"] Jan 29 12:34:40 crc kubenswrapper[4753]: I0129 12:34:40.131875 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["swift-kuttl-tests/openstack-galera-1"] Jan 29 12:34:40 crc kubenswrapper[4753]: I0129 12:34:40.141892 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["swift-kuttl-tests/openstack-galera-0"] Jan 29 12:34:40 crc kubenswrapper[4753]: I0129 12:34:40.149473 4753 scope.go:117] "RemoveContainer" containerID="56e14626896d6f1f29b27a6eed7fc588033ff40f6c37175ca5a67638ead7c7b0" Jan 29 12:34:40 crc kubenswrapper[4753]: E0129 12:34:40.150267 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"56e14626896d6f1f29b27a6eed7fc588033ff40f6c37175ca5a67638ead7c7b0\": container with ID starting with 56e14626896d6f1f29b27a6eed7fc588033ff40f6c37175ca5a67638ead7c7b0 not found: ID does not exist" containerID="56e14626896d6f1f29b27a6eed7fc588033ff40f6c37175ca5a67638ead7c7b0" Jan 29 12:34:40 crc kubenswrapper[4753]: I0129 12:34:40.150320 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"56e14626896d6f1f29b27a6eed7fc588033ff40f6c37175ca5a67638ead7c7b0"} err="failed to get container status \"56e14626896d6f1f29b27a6eed7fc588033ff40f6c37175ca5a67638ead7c7b0\": rpc error: code = NotFound desc = could not find container \"56e14626896d6f1f29b27a6eed7fc588033ff40f6c37175ca5a67638ead7c7b0\": container with ID starting with 56e14626896d6f1f29b27a6eed7fc588033ff40f6c37175ca5a67638ead7c7b0 not found: ID does not exist" Jan 29 12:34:40 crc kubenswrapper[4753]: I0129 12:34:40.158473 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["swift-kuttl-tests/openstack-galera-2"] Jan 29 12:34:40 crc kubenswrapper[4753]: I0129 12:34:40.189095 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["swift-kuttl-tests/keystone-66595c964d-6hnff"] Jan 29 12:34:40 crc kubenswrapper[4753]: I0129 12:34:40.197678 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["swift-kuttl-tests/keystone-66595c964d-6hnff"] Jan 29 12:34:40 crc kubenswrapper[4753]: I0129 12:34:40.207530 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["swift-kuttl-tests/root-account-create-update-h92qz"] Jan 29 12:34:40 crc kubenswrapper[4753]: E0129 12:34:40.208147 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[kube-api-access-54rc7 operator-scripts], unattached volumes=[], failed to process volumes=[]: context canceled" pod="swift-kuttl-tests/root-account-create-update-h92qz" podUID="a5b1faff-ae0a-4e43-942a-c49c76bccfd2" Jan 29 12:34:40 crc kubenswrapper[4753]: I0129 12:34:40.224267 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["swift-kuttl-tests/keystone-db-create-ll7kp"] Jan 29 12:34:40 crc kubenswrapper[4753]: I0129 12:34:40.237455 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["swift-kuttl-tests/keystone-db-create-ll7kp"] Jan 29 12:34:40 crc kubenswrapper[4753]: I0129 12:34:40.249806 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["swift-kuttl-tests/keystone-dfb1-account-create-update-sz78h"] Jan 29 12:34:40 crc kubenswrapper[4753]: I0129 12:34:40.254199 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["swift-kuttl-tests/keystonedfb1-account-delete-t7d55"] Jan 29 12:34:40 crc kubenswrapper[4753]: I0129 12:34:40.254609 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/keystonedfb1-account-delete-t7d55" podUID="74538c68-72a6-44a4-a906-3cf0a864a392" containerName="mariadb-account-delete" containerID="cri-o://f01bf5128c8e96d8ceb312c4f1b44ad410f48f5c54623eead170d31ce0311c5a" gracePeriod=30 Jan 29 12:34:40 crc kubenswrapper[4753]: I0129 12:34:40.259761 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a5b1faff-ae0a-4e43-942a-c49c76bccfd2-operator-scripts\") pod \"root-account-create-update-h92qz\" (UID: \"a5b1faff-ae0a-4e43-942a-c49c76bccfd2\") " pod="swift-kuttl-tests/root-account-create-update-h92qz" Jan 29 12:34:40 crc kubenswrapper[4753]: I0129 12:34:40.259882 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-54rc7\" (UniqueName: \"kubernetes.io/projected/a5b1faff-ae0a-4e43-942a-c49c76bccfd2-kube-api-access-54rc7\") pod \"root-account-create-update-h92qz\" (UID: \"a5b1faff-ae0a-4e43-942a-c49c76bccfd2\") " pod="swift-kuttl-tests/root-account-create-update-h92qz" Jan 29 12:34:40 crc kubenswrapper[4753]: I0129 12:34:40.261438 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["swift-kuttl-tests/keystone-dfb1-account-create-update-sz78h"] Jan 29 12:34:40 crc kubenswrapper[4753]: I0129 12:34:40.268321 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["swift-kuttl-tests/barbican-db-create-wbnw7"] Jan 29 12:34:40 crc kubenswrapper[4753]: I0129 12:34:40.272027 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["swift-kuttl-tests/barbican-db-create-wbnw7"] Jan 29 12:34:40 crc kubenswrapper[4753]: I0129 12:34:40.279598 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["swift-kuttl-tests/barbican-c51a-account-create-update-w5dvf"] Jan 29 12:34:40 crc kubenswrapper[4753]: I0129 12:34:40.284069 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["swift-kuttl-tests/barbicanc51a-account-delete-zxwrl"] Jan 29 12:34:40 crc kubenswrapper[4753]: I0129 12:34:40.288115 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["swift-kuttl-tests/barbican-c51a-account-create-update-w5dvf"] Jan 29 12:34:40 crc kubenswrapper[4753]: I0129 12:34:40.292172 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["swift-kuttl-tests/barbicanc51a-account-delete-zxwrl"] Jan 29 12:34:40 crc kubenswrapper[4753]: I0129 12:34:40.328512 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/openstack-galera-2" podUID="cc8c9706-0165-4cc6-ad22-bc70f03d5bf9" containerName="galera" containerID="cri-o://a5d51765dc77046a849f5638d73d0b342a8786d872c3f6feb3a67d55f33d5ea1" gracePeriod=30 Jan 29 12:34:40 crc kubenswrapper[4753]: I0129 12:34:40.361319 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a5b1faff-ae0a-4e43-942a-c49c76bccfd2-operator-scripts\") pod \"root-account-create-update-h92qz\" (UID: \"a5b1faff-ae0a-4e43-942a-c49c76bccfd2\") " pod="swift-kuttl-tests/root-account-create-update-h92qz" Jan 29 12:34:40 crc kubenswrapper[4753]: I0129 12:34:40.361433 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-54rc7\" (UniqueName: \"kubernetes.io/projected/a5b1faff-ae0a-4e43-942a-c49c76bccfd2-kube-api-access-54rc7\") pod \"root-account-create-update-h92qz\" (UID: \"a5b1faff-ae0a-4e43-942a-c49c76bccfd2\") " pod="swift-kuttl-tests/root-account-create-update-h92qz" Jan 29 12:34:40 crc kubenswrapper[4753]: E0129 12:34:40.361934 4753 configmap.go:193] Couldn't get configMap swift-kuttl-tests/openstack-scripts: configmap "openstack-scripts" not found Jan 29 12:34:40 crc kubenswrapper[4753]: E0129 12:34:40.362007 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/a5b1faff-ae0a-4e43-942a-c49c76bccfd2-operator-scripts podName:a5b1faff-ae0a-4e43-942a-c49c76bccfd2 nodeName:}" failed. No retries permitted until 2026-01-29 12:34:40.861973282 +0000 UTC m=+1695.114054727 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/a5b1faff-ae0a-4e43-942a-c49c76bccfd2-operator-scripts") pod "root-account-create-update-h92qz" (UID: "a5b1faff-ae0a-4e43-942a-c49c76bccfd2") : configmap "openstack-scripts" not found Jan 29 12:34:40 crc kubenswrapper[4753]: E0129 12:34:40.367102 4753 projected.go:194] Error preparing data for projected volume kube-api-access-54rc7 for pod swift-kuttl-tests/root-account-create-update-h92qz: failed to fetch token: serviceaccounts "galera-openstack" not found Jan 29 12:34:40 crc kubenswrapper[4753]: E0129 12:34:40.367218 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/a5b1faff-ae0a-4e43-942a-c49c76bccfd2-kube-api-access-54rc7 podName:a5b1faff-ae0a-4e43-942a-c49c76bccfd2 nodeName:}" failed. No retries permitted until 2026-01-29 12:34:40.867191602 +0000 UTC m=+1695.119273067 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-54rc7" (UniqueName: "kubernetes.io/projected/a5b1faff-ae0a-4e43-942a-c49c76bccfd2-kube-api-access-54rc7") pod "root-account-create-update-h92qz" (UID: "a5b1faff-ae0a-4e43-942a-c49c76bccfd2") : failed to fetch token: serviceaccounts "galera-openstack" not found Jan 29 12:34:40 crc kubenswrapper[4753]: I0129 12:34:40.869052 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-54rc7\" (UniqueName: \"kubernetes.io/projected/a5b1faff-ae0a-4e43-942a-c49c76bccfd2-kube-api-access-54rc7\") pod \"root-account-create-update-h92qz\" (UID: \"a5b1faff-ae0a-4e43-942a-c49c76bccfd2\") " pod="swift-kuttl-tests/root-account-create-update-h92qz" Jan 29 12:34:40 crc kubenswrapper[4753]: I0129 12:34:40.870077 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a5b1faff-ae0a-4e43-942a-c49c76bccfd2-operator-scripts\") pod \"root-account-create-update-h92qz\" (UID: \"a5b1faff-ae0a-4e43-942a-c49c76bccfd2\") " pod="swift-kuttl-tests/root-account-create-update-h92qz" Jan 29 12:34:40 crc kubenswrapper[4753]: E0129 12:34:40.870223 4753 configmap.go:193] Couldn't get configMap swift-kuttl-tests/openstack-scripts: configmap "openstack-scripts" not found Jan 29 12:34:40 crc kubenswrapper[4753]: E0129 12:34:40.870348 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/a5b1faff-ae0a-4e43-942a-c49c76bccfd2-operator-scripts podName:a5b1faff-ae0a-4e43-942a-c49c76bccfd2 nodeName:}" failed. No retries permitted until 2026-01-29 12:34:41.870327027 +0000 UTC m=+1696.122408472 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/a5b1faff-ae0a-4e43-942a-c49c76bccfd2-operator-scripts") pod "root-account-create-update-h92qz" (UID: "a5b1faff-ae0a-4e43-942a-c49c76bccfd2") : configmap "openstack-scripts" not found Jan 29 12:34:40 crc kubenswrapper[4753]: E0129 12:34:40.876379 4753 projected.go:194] Error preparing data for projected volume kube-api-access-54rc7 for pod swift-kuttl-tests/root-account-create-update-h92qz: failed to fetch token: serviceaccounts "galera-openstack" not found Jan 29 12:34:40 crc kubenswrapper[4753]: E0129 12:34:40.876474 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/a5b1faff-ae0a-4e43-942a-c49c76bccfd2-kube-api-access-54rc7 podName:a5b1faff-ae0a-4e43-942a-c49c76bccfd2 nodeName:}" failed. No retries permitted until 2026-01-29 12:34:41.876451913 +0000 UTC m=+1696.128533368 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-54rc7" (UniqueName: "kubernetes.io/projected/a5b1faff-ae0a-4e43-942a-c49c76bccfd2-kube-api-access-54rc7") pod "root-account-create-update-h92qz" (UID: "a5b1faff-ae0a-4e43-942a-c49c76bccfd2") : failed to fetch token: serviceaccounts "galera-openstack" not found Jan 29 12:34:40 crc kubenswrapper[4753]: I0129 12:34:40.889418 4753 scope.go:117] "RemoveContainer" containerID="00211d2562b6632acb04decda9045cbb8d3216adca69cd5bc1380ec42b8211e0" Jan 29 12:34:40 crc kubenswrapper[4753]: E0129 12:34:40.889750 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7c24x_openshift-machine-config-operator(b0310995-a7c7-47c3-ae6c-05daaaba92a6)\"" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" podUID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" Jan 29 12:34:41 crc kubenswrapper[4753]: I0129 12:34:41.023755 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["swift-kuttl-tests/memcached-0"] Jan 29 12:34:41 crc kubenswrapper[4753]: I0129 12:34:41.024009 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/memcached-0" podUID="bd1f7149-df12-4c02-9585-238bfd26f12d" containerName="memcached" containerID="cri-o://7e2563e7ba256c98d58bb645a318e00f67fdfe4d6d9a7c0aad6f2e2e9bb5165c" gracePeriod=30 Jan 29 12:34:41 crc kubenswrapper[4753]: I0129 12:34:41.108946 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/root-account-create-update-h92qz" Jan 29 12:34:41 crc kubenswrapper[4753]: I0129 12:34:41.119609 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/root-account-create-update-h92qz" Jan 29 12:34:41 crc kubenswrapper[4753]: I0129 12:34:41.512890 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/rabbitmq-server-0"] Jan 29 12:34:41 crc kubenswrapper[4753]: I0129 12:34:41.885855 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a5b1faff-ae0a-4e43-942a-c49c76bccfd2-operator-scripts\") pod \"root-account-create-update-h92qz\" (UID: \"a5b1faff-ae0a-4e43-942a-c49c76bccfd2\") " pod="swift-kuttl-tests/root-account-create-update-h92qz" Jan 29 12:34:41 crc kubenswrapper[4753]: E0129 12:34:41.886254 4753 configmap.go:193] Couldn't get configMap swift-kuttl-tests/openstack-scripts: configmap "openstack-scripts" not found Jan 29 12:34:41 crc kubenswrapper[4753]: I0129 12:34:41.886323 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-54rc7\" (UniqueName: \"kubernetes.io/projected/a5b1faff-ae0a-4e43-942a-c49c76bccfd2-kube-api-access-54rc7\") pod \"root-account-create-update-h92qz\" (UID: \"a5b1faff-ae0a-4e43-942a-c49c76bccfd2\") " pod="swift-kuttl-tests/root-account-create-update-h92qz" Jan 29 12:34:41 crc kubenswrapper[4753]: E0129 12:34:41.886378 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/a5b1faff-ae0a-4e43-942a-c49c76bccfd2-operator-scripts podName:a5b1faff-ae0a-4e43-942a-c49c76bccfd2 nodeName:}" failed. No retries permitted until 2026-01-29 12:34:43.886336385 +0000 UTC m=+1698.138417880 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/a5b1faff-ae0a-4e43-942a-c49c76bccfd2-operator-scripts") pod "root-account-create-update-h92qz" (UID: "a5b1faff-ae0a-4e43-942a-c49c76bccfd2") : configmap "openstack-scripts" not found Jan 29 12:34:41 crc kubenswrapper[4753]: E0129 12:34:41.893259 4753 projected.go:194] Error preparing data for projected volume kube-api-access-54rc7 for pod swift-kuttl-tests/root-account-create-update-h92qz: failed to fetch token: serviceaccounts "galera-openstack" not found Jan 29 12:34:41 crc kubenswrapper[4753]: E0129 12:34:41.893363 4753 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/a5b1faff-ae0a-4e43-942a-c49c76bccfd2-kube-api-access-54rc7 podName:a5b1faff-ae0a-4e43-942a-c49c76bccfd2 nodeName:}" failed. No retries permitted until 2026-01-29 12:34:43.893340347 +0000 UTC m=+1698.145421812 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-54rc7" (UniqueName: "kubernetes.io/projected/a5b1faff-ae0a-4e43-942a-c49c76bccfd2-kube-api-access-54rc7") pod "root-account-create-update-h92qz" (UID: "a5b1faff-ae0a-4e43-942a-c49c76bccfd2") : failed to fetch token: serviceaccounts "galera-openstack" not found Jan 29 12:34:41 crc kubenswrapper[4753]: I0129 12:34:41.914297 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="078e56f6-dfd4-4ca7-81c4-2d4a5efd44a3" path="/var/lib/kubelet/pods/078e56f6-dfd4-4ca7-81c4-2d4a5efd44a3/volumes" Jan 29 12:34:41 crc kubenswrapper[4753]: I0129 12:34:41.914914 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5db8a16d-47c1-4e9c-870d-a8d6490202e8" path="/var/lib/kubelet/pods/5db8a16d-47c1-4e9c-870d-a8d6490202e8/volumes" Jan 29 12:34:41 crc kubenswrapper[4753]: I0129 12:34:41.916653 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7832353e-7e00-44af-bd6a-9227ef58fd05" path="/var/lib/kubelet/pods/7832353e-7e00-44af-bd6a-9227ef58fd05/volumes" Jan 29 12:34:41 crc kubenswrapper[4753]: I0129 12:34:41.917417 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="924e55f9-5adb-46df-88dc-6a3ffcef9fba" path="/var/lib/kubelet/pods/924e55f9-5adb-46df-88dc-6a3ffcef9fba/volumes" Jan 29 12:34:41 crc kubenswrapper[4753]: I0129 12:34:41.918653 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9841bda3-b354-449f-ac31-65752f2735b8" path="/var/lib/kubelet/pods/9841bda3-b354-449f-ac31-65752f2735b8/volumes" Jan 29 12:34:41 crc kubenswrapper[4753]: I0129 12:34:41.919320 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d138c317-92d8-4dc4-ac16-87975335e9fb" path="/var/lib/kubelet/pods/d138c317-92d8-4dc4-ac16-87975335e9fb/volumes" Jan 29 12:34:41 crc kubenswrapper[4753]: I0129 12:34:41.941147 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["swift-kuttl-tests/rabbitmq-server-0"] Jan 29 12:34:41 crc kubenswrapper[4753]: I0129 12:34:41.979415 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/openstack-galera-2" Jan 29 12:34:42 crc kubenswrapper[4753]: I0129 12:34:42.089338 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/cc8c9706-0165-4cc6-ad22-bc70f03d5bf9-kolla-config\") pod \"cc8c9706-0165-4cc6-ad22-bc70f03d5bf9\" (UID: \"cc8c9706-0165-4cc6-ad22-bc70f03d5bf9\") " Jan 29 12:34:42 crc kubenswrapper[4753]: I0129 12:34:42.089685 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q4dzf\" (UniqueName: \"kubernetes.io/projected/cc8c9706-0165-4cc6-ad22-bc70f03d5bf9-kube-api-access-q4dzf\") pod \"cc8c9706-0165-4cc6-ad22-bc70f03d5bf9\" (UID: \"cc8c9706-0165-4cc6-ad22-bc70f03d5bf9\") " Jan 29 12:34:42 crc kubenswrapper[4753]: I0129 12:34:42.089935 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cc8c9706-0165-4cc6-ad22-bc70f03d5bf9-operator-scripts\") pod \"cc8c9706-0165-4cc6-ad22-bc70f03d5bf9\" (UID: \"cc8c9706-0165-4cc6-ad22-bc70f03d5bf9\") " Jan 29 12:34:42 crc kubenswrapper[4753]: I0129 12:34:42.090051 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mysql-db\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"cc8c9706-0165-4cc6-ad22-bc70f03d5bf9\" (UID: \"cc8c9706-0165-4cc6-ad22-bc70f03d5bf9\") " Jan 29 12:34:42 crc kubenswrapper[4753]: I0129 12:34:42.090143 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/cc8c9706-0165-4cc6-ad22-bc70f03d5bf9-config-data-generated\") pod \"cc8c9706-0165-4cc6-ad22-bc70f03d5bf9\" (UID: \"cc8c9706-0165-4cc6-ad22-bc70f03d5bf9\") " Jan 29 12:34:42 crc kubenswrapper[4753]: I0129 12:34:42.090217 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/cc8c9706-0165-4cc6-ad22-bc70f03d5bf9-config-data-default\") pod \"cc8c9706-0165-4cc6-ad22-bc70f03d5bf9\" (UID: \"cc8c9706-0165-4cc6-ad22-bc70f03d5bf9\") " Jan 29 12:34:42 crc kubenswrapper[4753]: I0129 12:34:42.091019 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cc8c9706-0165-4cc6-ad22-bc70f03d5bf9-config-data-default" (OuterVolumeSpecName: "config-data-default") pod "cc8c9706-0165-4cc6-ad22-bc70f03d5bf9" (UID: "cc8c9706-0165-4cc6-ad22-bc70f03d5bf9"). InnerVolumeSpecName "config-data-default". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:34:42 crc kubenswrapper[4753]: I0129 12:34:42.091546 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cc8c9706-0165-4cc6-ad22-bc70f03d5bf9-kolla-config" (OuterVolumeSpecName: "kolla-config") pod "cc8c9706-0165-4cc6-ad22-bc70f03d5bf9" (UID: "cc8c9706-0165-4cc6-ad22-bc70f03d5bf9"). InnerVolumeSpecName "kolla-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:34:42 crc kubenswrapper[4753]: I0129 12:34:42.103046 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cc8c9706-0165-4cc6-ad22-bc70f03d5bf9-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "cc8c9706-0165-4cc6-ad22-bc70f03d5bf9" (UID: "cc8c9706-0165-4cc6-ad22-bc70f03d5bf9"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:34:42 crc kubenswrapper[4753]: I0129 12:34:42.103871 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cc8c9706-0165-4cc6-ad22-bc70f03d5bf9-config-data-generated" (OuterVolumeSpecName: "config-data-generated") pod "cc8c9706-0165-4cc6-ad22-bc70f03d5bf9" (UID: "cc8c9706-0165-4cc6-ad22-bc70f03d5bf9"). InnerVolumeSpecName "config-data-generated". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:34:42 crc kubenswrapper[4753]: I0129 12:34:42.118586 4753 generic.go:334] "Generic (PLEG): container finished" podID="cc8c9706-0165-4cc6-ad22-bc70f03d5bf9" containerID="a5d51765dc77046a849f5638d73d0b342a8786d872c3f6feb3a67d55f33d5ea1" exitCode=0 Jan 29 12:34:42 crc kubenswrapper[4753]: I0129 12:34:42.118998 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/openstack-galera-2" Jan 29 12:34:42 crc kubenswrapper[4753]: I0129 12:34:42.119373 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/openstack-galera-2" event={"ID":"cc8c9706-0165-4cc6-ad22-bc70f03d5bf9","Type":"ContainerDied","Data":"a5d51765dc77046a849f5638d73d0b342a8786d872c3f6feb3a67d55f33d5ea1"} Jan 29 12:34:42 crc kubenswrapper[4753]: I0129 12:34:42.119410 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/openstack-galera-2" event={"ID":"cc8c9706-0165-4cc6-ad22-bc70f03d5bf9","Type":"ContainerDied","Data":"e9712456d995f396f56aa44cf6d5fcb616e59dc82ef5ecc85e33b86cb121dcfd"} Jan 29 12:34:42 crc kubenswrapper[4753]: I0129 12:34:42.119427 4753 scope.go:117] "RemoveContainer" containerID="a5d51765dc77046a849f5638d73d0b342a8786d872c3f6feb3a67d55f33d5ea1" Jan 29 12:34:42 crc kubenswrapper[4753]: I0129 12:34:42.119511 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/root-account-create-update-h92qz" Jan 29 12:34:42 crc kubenswrapper[4753]: I0129 12:34:42.164772 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/rabbitmq-server-0" podUID="e0033592-f961-4066-9026-3645b09f9524" containerName="rabbitmq" containerID="cri-o://108d86555b06dd78575168c0015609068de2878c9d5b7f8b53f63cb72ed39786" gracePeriod=604800 Jan 29 12:34:42 crc kubenswrapper[4753]: I0129 12:34:42.913082 4753 reconciler_common.go:293] "Volume detached for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/cc8c9706-0165-4cc6-ad22-bc70f03d5bf9-kolla-config\") on node \"crc\" DevicePath \"\"" Jan 29 12:34:42 crc kubenswrapper[4753]: I0129 12:34:42.913117 4753 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cc8c9706-0165-4cc6-ad22-bc70f03d5bf9-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 12:34:42 crc kubenswrapper[4753]: I0129 12:34:42.913131 4753 reconciler_common.go:293] "Volume detached for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/cc8c9706-0165-4cc6-ad22-bc70f03d5bf9-config-data-default\") on node \"crc\" DevicePath \"\"" Jan 29 12:34:42 crc kubenswrapper[4753]: I0129 12:34:42.913145 4753 reconciler_common.go:293] "Volume detached for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/cc8c9706-0165-4cc6-ad22-bc70f03d5bf9-config-data-generated\") on node \"crc\" DevicePath \"\"" Jan 29 12:34:42 crc kubenswrapper[4753]: I0129 12:34:42.923542 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage03-crc" (OuterVolumeSpecName: "mysql-db") pod "cc8c9706-0165-4cc6-ad22-bc70f03d5bf9" (UID: "cc8c9706-0165-4cc6-ad22-bc70f03d5bf9"). InnerVolumeSpecName "local-storage03-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 29 12:34:42 crc kubenswrapper[4753]: I0129 12:34:42.924113 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cc8c9706-0165-4cc6-ad22-bc70f03d5bf9-kube-api-access-q4dzf" (OuterVolumeSpecName: "kube-api-access-q4dzf") pod "cc8c9706-0165-4cc6-ad22-bc70f03d5bf9" (UID: "cc8c9706-0165-4cc6-ad22-bc70f03d5bf9"). InnerVolumeSpecName "kube-api-access-q4dzf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:34:42 crc kubenswrapper[4753]: I0129 12:34:42.973764 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/openstack-galera-1" podUID="89e38fc2-9f07-45f0-8cdc-b77931872d7b" containerName="galera" containerID="cri-o://fd2d4883cbb5e061192a0055cb839eaec1e2e1653c1b4339edbd2330756bbef4" gracePeriod=28 Jan 29 12:34:43 crc kubenswrapper[4753]: I0129 12:34:43.014420 4753 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" " Jan 29 12:34:43 crc kubenswrapper[4753]: I0129 12:34:43.014470 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q4dzf\" (UniqueName: \"kubernetes.io/projected/cc8c9706-0165-4cc6-ad22-bc70f03d5bf9-kube-api-access-q4dzf\") on node \"crc\" DevicePath \"\"" Jan 29 12:34:43 crc kubenswrapper[4753]: I0129 12:34:43.028765 4753 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage03-crc" (UniqueName: "kubernetes.io/local-volume/local-storage03-crc") on node "crc" Jan 29 12:34:43 crc kubenswrapper[4753]: I0129 12:34:43.083913 4753 scope.go:117] "RemoveContainer" containerID="9e7761deaabf5405004d0a17c8a33805dd7077c52d15bba22c2ae2e917b92fc0" Jan 29 12:34:43 crc kubenswrapper[4753]: I0129 12:34:43.085097 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["swift-kuttl-tests/root-account-create-update-h92qz"] Jan 29 12:34:43 crc kubenswrapper[4753]: I0129 12:34:43.093489 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["swift-kuttl-tests/root-account-create-update-h92qz"] Jan 29 12:34:43 crc kubenswrapper[4753]: I0129 12:34:43.107971 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["swift-kuttl-tests/openstack-galera-2"] Jan 29 12:34:43 crc kubenswrapper[4753]: I0129 12:34:43.116870 4753 reconciler_common.go:293] "Volume detached for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" DevicePath \"\"" Jan 29 12:34:43 crc kubenswrapper[4753]: I0129 12:34:43.116972 4753 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a5b1faff-ae0a-4e43-942a-c49c76bccfd2-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 12:34:43 crc kubenswrapper[4753]: I0129 12:34:43.117097 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-54rc7\" (UniqueName: \"kubernetes.io/projected/a5b1faff-ae0a-4e43-942a-c49c76bccfd2-kube-api-access-54rc7\") on node \"crc\" DevicePath \"\"" Jan 29 12:34:43 crc kubenswrapper[4753]: I0129 12:34:43.121956 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["swift-kuttl-tests/openstack-galera-2"] Jan 29 12:34:43 crc kubenswrapper[4753]: I0129 12:34:43.127044 4753 scope.go:117] "RemoveContainer" containerID="a5d51765dc77046a849f5638d73d0b342a8786d872c3f6feb3a67d55f33d5ea1" Jan 29 12:34:43 crc kubenswrapper[4753]: E0129 12:34:43.128833 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a5d51765dc77046a849f5638d73d0b342a8786d872c3f6feb3a67d55f33d5ea1\": container with ID starting with a5d51765dc77046a849f5638d73d0b342a8786d872c3f6feb3a67d55f33d5ea1 not found: ID does not exist" containerID="a5d51765dc77046a849f5638d73d0b342a8786d872c3f6feb3a67d55f33d5ea1" Jan 29 12:34:43 crc kubenswrapper[4753]: I0129 12:34:43.128876 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a5d51765dc77046a849f5638d73d0b342a8786d872c3f6feb3a67d55f33d5ea1"} err="failed to get container status \"a5d51765dc77046a849f5638d73d0b342a8786d872c3f6feb3a67d55f33d5ea1\": rpc error: code = NotFound desc = could not find container \"a5d51765dc77046a849f5638d73d0b342a8786d872c3f6feb3a67d55f33d5ea1\": container with ID starting with a5d51765dc77046a849f5638d73d0b342a8786d872c3f6feb3a67d55f33d5ea1 not found: ID does not exist" Jan 29 12:34:43 crc kubenswrapper[4753]: I0129 12:34:43.128901 4753 scope.go:117] "RemoveContainer" containerID="9e7761deaabf5405004d0a17c8a33805dd7077c52d15bba22c2ae2e917b92fc0" Jan 29 12:34:43 crc kubenswrapper[4753]: E0129 12:34:43.131347 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9e7761deaabf5405004d0a17c8a33805dd7077c52d15bba22c2ae2e917b92fc0\": container with ID starting with 9e7761deaabf5405004d0a17c8a33805dd7077c52d15bba22c2ae2e917b92fc0 not found: ID does not exist" containerID="9e7761deaabf5405004d0a17c8a33805dd7077c52d15bba22c2ae2e917b92fc0" Jan 29 12:34:43 crc kubenswrapper[4753]: I0129 12:34:43.131439 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9e7761deaabf5405004d0a17c8a33805dd7077c52d15bba22c2ae2e917b92fc0"} err="failed to get container status \"9e7761deaabf5405004d0a17c8a33805dd7077c52d15bba22c2ae2e917b92fc0\": rpc error: code = NotFound desc = could not find container \"9e7761deaabf5405004d0a17c8a33805dd7077c52d15bba22c2ae2e917b92fc0\": container with ID starting with 9e7761deaabf5405004d0a17c8a33805dd7077c52d15bba22c2ae2e917b92fc0 not found: ID does not exist" Jan 29 12:34:43 crc kubenswrapper[4753]: I0129 12:34:43.142368 4753 generic.go:334] "Generic (PLEG): container finished" podID="bd1f7149-df12-4c02-9585-238bfd26f12d" containerID="7e2563e7ba256c98d58bb645a318e00f67fdfe4d6d9a7c0aad6f2e2e9bb5165c" exitCode=0 Jan 29 12:34:43 crc kubenswrapper[4753]: I0129 12:34:43.142419 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/memcached-0" event={"ID":"bd1f7149-df12-4c02-9585-238bfd26f12d","Type":"ContainerDied","Data":"7e2563e7ba256c98d58bb645a318e00f67fdfe4d6d9a7c0aad6f2e2e9bb5165c"} Jan 29 12:34:43 crc kubenswrapper[4753]: I0129 12:34:43.796267 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/memcached-0" Jan 29 12:34:43 crc kubenswrapper[4753]: I0129 12:34:43.842867 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zfhlb\" (UniqueName: \"kubernetes.io/projected/bd1f7149-df12-4c02-9585-238bfd26f12d-kube-api-access-zfhlb\") pod \"bd1f7149-df12-4c02-9585-238bfd26f12d\" (UID: \"bd1f7149-df12-4c02-9585-238bfd26f12d\") " Jan 29 12:34:43 crc kubenswrapper[4753]: I0129 12:34:43.842955 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/bd1f7149-df12-4c02-9585-238bfd26f12d-config-data\") pod \"bd1f7149-df12-4c02-9585-238bfd26f12d\" (UID: \"bd1f7149-df12-4c02-9585-238bfd26f12d\") " Jan 29 12:34:43 crc kubenswrapper[4753]: I0129 12:34:43.843030 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/bd1f7149-df12-4c02-9585-238bfd26f12d-kolla-config\") pod \"bd1f7149-df12-4c02-9585-238bfd26f12d\" (UID: \"bd1f7149-df12-4c02-9585-238bfd26f12d\") " Jan 29 12:34:43 crc kubenswrapper[4753]: I0129 12:34:43.844200 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bd1f7149-df12-4c02-9585-238bfd26f12d-kolla-config" (OuterVolumeSpecName: "kolla-config") pod "bd1f7149-df12-4c02-9585-238bfd26f12d" (UID: "bd1f7149-df12-4c02-9585-238bfd26f12d"). InnerVolumeSpecName "kolla-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:34:43 crc kubenswrapper[4753]: I0129 12:34:43.844382 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bd1f7149-df12-4c02-9585-238bfd26f12d-config-data" (OuterVolumeSpecName: "config-data") pod "bd1f7149-df12-4c02-9585-238bfd26f12d" (UID: "bd1f7149-df12-4c02-9585-238bfd26f12d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:34:43 crc kubenswrapper[4753]: I0129 12:34:43.848526 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd1f7149-df12-4c02-9585-238bfd26f12d-kube-api-access-zfhlb" (OuterVolumeSpecName: "kube-api-access-zfhlb") pod "bd1f7149-df12-4c02-9585-238bfd26f12d" (UID: "bd1f7149-df12-4c02-9585-238bfd26f12d"). InnerVolumeSpecName "kube-api-access-zfhlb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:34:43 crc kubenswrapper[4753]: I0129 12:34:43.898403 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a5b1faff-ae0a-4e43-942a-c49c76bccfd2" path="/var/lib/kubelet/pods/a5b1faff-ae0a-4e43-942a-c49c76bccfd2/volumes" Jan 29 12:34:43 crc kubenswrapper[4753]: I0129 12:34:43.899087 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cc8c9706-0165-4cc6-ad22-bc70f03d5bf9" path="/var/lib/kubelet/pods/cc8c9706-0165-4cc6-ad22-bc70f03d5bf9/volumes" Jan 29 12:34:43 crc kubenswrapper[4753]: I0129 12:34:43.939649 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/swift-operator-controller-manager-f8bdff7d9-wjc8b"] Jan 29 12:34:43 crc kubenswrapper[4753]: I0129 12:34:43.939943 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/swift-operator-controller-manager-f8bdff7d9-wjc8b" podUID="f49f1749-fe9d-4a18-b81c-65ab628d882e" containerName="manager" containerID="cri-o://ec4fcea80b360bf897b1acf454663f063df4e89ac871de1e919e9cbb26ef50b8" gracePeriod=10 Jan 29 12:34:43 crc kubenswrapper[4753]: I0129 12:34:43.946674 4753 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/bd1f7149-df12-4c02-9585-238bfd26f12d-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 12:34:43 crc kubenswrapper[4753]: I0129 12:34:43.946703 4753 reconciler_common.go:293] "Volume detached for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/bd1f7149-df12-4c02-9585-238bfd26f12d-kolla-config\") on node \"crc\" DevicePath \"\"" Jan 29 12:34:43 crc kubenswrapper[4753]: I0129 12:34:43.946712 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zfhlb\" (UniqueName: \"kubernetes.io/projected/bd1f7149-df12-4c02-9585-238bfd26f12d-kube-api-access-zfhlb\") on node \"crc\" DevicePath \"\"" Jan 29 12:34:44 crc kubenswrapper[4753]: I0129 12:34:44.168139 4753 generic.go:334] "Generic (PLEG): container finished" podID="f49f1749-fe9d-4a18-b81c-65ab628d882e" containerID="ec4fcea80b360bf897b1acf454663f063df4e89ac871de1e919e9cbb26ef50b8" exitCode=0 Jan 29 12:34:44 crc kubenswrapper[4753]: I0129 12:34:44.168545 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-f8bdff7d9-wjc8b" event={"ID":"f49f1749-fe9d-4a18-b81c-65ab628d882e","Type":"ContainerDied","Data":"ec4fcea80b360bf897b1acf454663f063df4e89ac871de1e919e9cbb26ef50b8"} Jan 29 12:34:44 crc kubenswrapper[4753]: I0129 12:34:44.177789 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/memcached-0" event={"ID":"bd1f7149-df12-4c02-9585-238bfd26f12d","Type":"ContainerDied","Data":"45784504d0eb5f699a70326b32b71dc770138ec4ab5273af9162b72ee4adf912"} Jan 29 12:34:44 crc kubenswrapper[4753]: I0129 12:34:44.178101 4753 scope.go:117] "RemoveContainer" containerID="7e2563e7ba256c98d58bb645a318e00f67fdfe4d6d9a7c0aad6f2e2e9bb5165c" Jan 29 12:34:44 crc kubenswrapper[4753]: I0129 12:34:44.178169 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/memcached-0" Jan 29 12:34:44 crc kubenswrapper[4753]: I0129 12:34:44.203972 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/swift-operator-index-g5k7d"] Jan 29 12:34:44 crc kubenswrapper[4753]: I0129 12:34:44.204211 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/swift-operator-index-g5k7d" podUID="01f1fcff-ebfe-4f44-9ed6-ea5308d4f2b6" containerName="registry-server" containerID="cri-o://82e9d7a70f0b4833f80f4d86dd3202dbe66cf538ebb9baf0f943b314054e5c86" gracePeriod=30 Jan 29 12:34:44 crc kubenswrapper[4753]: I0129 12:34:44.255073 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/7c9c431936913d3e6ade54f671817cf9e499411b73d2ff35ef387edf83d52zw"] Jan 29 12:34:44 crc kubenswrapper[4753]: I0129 12:34:44.280721 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/7c9c431936913d3e6ade54f671817cf9e499411b73d2ff35ef387edf83d52zw"] Jan 29 12:34:44 crc kubenswrapper[4753]: I0129 12:34:44.294325 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["swift-kuttl-tests/memcached-0"] Jan 29 12:34:44 crc kubenswrapper[4753]: I0129 12:34:44.323327 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["swift-kuttl-tests/memcached-0"] Jan 29 12:34:44 crc kubenswrapper[4753]: I0129 12:34:44.398509 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/openstack-galera-0" podUID="6762f214-cfd1-4314-9fa0-1d2b40e87b4e" containerName="galera" containerID="cri-o://955c359e87ea643dd73d843cc3480910edf3ef440c787c82d01331b918f7cb58" gracePeriod=26 Jan 29 12:34:44 crc kubenswrapper[4753]: I0129 12:34:44.590200 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-f8bdff7d9-wjc8b" Jan 29 12:34:44 crc kubenswrapper[4753]: I0129 12:34:44.765827 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/f49f1749-fe9d-4a18-b81c-65ab628d882e-apiservice-cert\") pod \"f49f1749-fe9d-4a18-b81c-65ab628d882e\" (UID: \"f49f1749-fe9d-4a18-b81c-65ab628d882e\") " Jan 29 12:34:44 crc kubenswrapper[4753]: I0129 12:34:44.766207 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/f49f1749-fe9d-4a18-b81c-65ab628d882e-webhook-cert\") pod \"f49f1749-fe9d-4a18-b81c-65ab628d882e\" (UID: \"f49f1749-fe9d-4a18-b81c-65ab628d882e\") " Jan 29 12:34:44 crc kubenswrapper[4753]: I0129 12:34:44.766410 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfpfv\" (UniqueName: \"kubernetes.io/projected/f49f1749-fe9d-4a18-b81c-65ab628d882e-kube-api-access-cfpfv\") pod \"f49f1749-fe9d-4a18-b81c-65ab628d882e\" (UID: \"f49f1749-fe9d-4a18-b81c-65ab628d882e\") " Jan 29 12:34:44 crc kubenswrapper[4753]: I0129 12:34:44.782609 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f49f1749-fe9d-4a18-b81c-65ab628d882e-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "f49f1749-fe9d-4a18-b81c-65ab628d882e" (UID: "f49f1749-fe9d-4a18-b81c-65ab628d882e"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:34:44 crc kubenswrapper[4753]: I0129 12:34:44.783268 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f49f1749-fe9d-4a18-b81c-65ab628d882e-kube-api-access-cfpfv" (OuterVolumeSpecName: "kube-api-access-cfpfv") pod "f49f1749-fe9d-4a18-b81c-65ab628d882e" (UID: "f49f1749-fe9d-4a18-b81c-65ab628d882e"). InnerVolumeSpecName "kube-api-access-cfpfv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:34:44 crc kubenswrapper[4753]: I0129 12:34:44.785464 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f49f1749-fe9d-4a18-b81c-65ab628d882e-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "f49f1749-fe9d-4a18-b81c-65ab628d882e" (UID: "f49f1749-fe9d-4a18-b81c-65ab628d882e"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:34:44 crc kubenswrapper[4753]: I0129 12:34:44.868813 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfpfv\" (UniqueName: \"kubernetes.io/projected/f49f1749-fe9d-4a18-b81c-65ab628d882e-kube-api-access-cfpfv\") on node \"crc\" DevicePath \"\"" Jan 29 12:34:44 crc kubenswrapper[4753]: I0129 12:34:44.868849 4753 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/f49f1749-fe9d-4a18-b81c-65ab628d882e-apiservice-cert\") on node \"crc\" DevicePath \"\"" Jan 29 12:34:44 crc kubenswrapper[4753]: I0129 12:34:44.868860 4753 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/f49f1749-fe9d-4a18-b81c-65ab628d882e-webhook-cert\") on node \"crc\" DevicePath \"\"" Jan 29 12:34:44 crc kubenswrapper[4753]: I0129 12:34:44.923629 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/rabbitmq-server-0" Jan 29 12:34:44 crc kubenswrapper[4753]: I0129 12:34:44.932553 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-index-g5k7d" Jan 29 12:34:45 crc kubenswrapper[4753]: I0129 12:34:45.071418 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-85f927ae-4eda-4b12-9ccc-0a8b9a462f5d\") pod \"e0033592-f961-4066-9026-3645b09f9524\" (UID: \"e0033592-f961-4066-9026-3645b09f9524\") " Jan 29 12:34:45 crc kubenswrapper[4753]: I0129 12:34:45.073344 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/e0033592-f961-4066-9026-3645b09f9524-rabbitmq-plugins\") pod \"e0033592-f961-4066-9026-3645b09f9524\" (UID: \"e0033592-f961-4066-9026-3645b09f9524\") " Jan 29 12:34:45 crc kubenswrapper[4753]: I0129 12:34:45.073577 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/e0033592-f961-4066-9026-3645b09f9524-rabbitmq-erlang-cookie\") pod \"e0033592-f961-4066-9026-3645b09f9524\" (UID: \"e0033592-f961-4066-9026-3645b09f9524\") " Jan 29 12:34:45 crc kubenswrapper[4753]: I0129 12:34:45.073748 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/e0033592-f961-4066-9026-3645b09f9524-pod-info\") pod \"e0033592-f961-4066-9026-3645b09f9524\" (UID: \"e0033592-f961-4066-9026-3645b09f9524\") " Jan 29 12:34:45 crc kubenswrapper[4753]: I0129 12:34:45.073945 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vfq8s\" (UniqueName: \"kubernetes.io/projected/01f1fcff-ebfe-4f44-9ed6-ea5308d4f2b6-kube-api-access-vfq8s\") pod \"01f1fcff-ebfe-4f44-9ed6-ea5308d4f2b6\" (UID: \"01f1fcff-ebfe-4f44-9ed6-ea5308d4f2b6\") " Jan 29 12:34:45 crc kubenswrapper[4753]: I0129 12:34:45.074109 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/e0033592-f961-4066-9026-3645b09f9524-rabbitmq-confd\") pod \"e0033592-f961-4066-9026-3645b09f9524\" (UID: \"e0033592-f961-4066-9026-3645b09f9524\") " Jan 29 12:34:45 crc kubenswrapper[4753]: I0129 12:34:45.074343 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/e0033592-f961-4066-9026-3645b09f9524-plugins-conf\") pod \"e0033592-f961-4066-9026-3645b09f9524\" (UID: \"e0033592-f961-4066-9026-3645b09f9524\") " Jan 29 12:34:45 crc kubenswrapper[4753]: I0129 12:34:45.073878 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e0033592-f961-4066-9026-3645b09f9524-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "e0033592-f961-4066-9026-3645b09f9524" (UID: "e0033592-f961-4066-9026-3645b09f9524"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:34:45 crc kubenswrapper[4753]: I0129 12:34:45.074166 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e0033592-f961-4066-9026-3645b09f9524-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "e0033592-f961-4066-9026-3645b09f9524" (UID: "e0033592-f961-4066-9026-3645b09f9524"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:34:45 crc kubenswrapper[4753]: I0129 12:34:45.074810 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x9czh\" (UniqueName: \"kubernetes.io/projected/e0033592-f961-4066-9026-3645b09f9524-kube-api-access-x9czh\") pod \"e0033592-f961-4066-9026-3645b09f9524\" (UID: \"e0033592-f961-4066-9026-3645b09f9524\") " Jan 29 12:34:45 crc kubenswrapper[4753]: I0129 12:34:45.075074 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/e0033592-f961-4066-9026-3645b09f9524-erlang-cookie-secret\") pod \"e0033592-f961-4066-9026-3645b09f9524\" (UID: \"e0033592-f961-4066-9026-3645b09f9524\") " Jan 29 12:34:45 crc kubenswrapper[4753]: I0129 12:34:45.075163 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e0033592-f961-4066-9026-3645b09f9524-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "e0033592-f961-4066-9026-3645b09f9524" (UID: "e0033592-f961-4066-9026-3645b09f9524"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:34:45 crc kubenswrapper[4753]: I0129 12:34:45.075939 4753 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/e0033592-f961-4066-9026-3645b09f9524-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Jan 29 12:34:45 crc kubenswrapper[4753]: I0129 12:34:45.076202 4753 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/e0033592-f961-4066-9026-3645b09f9524-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Jan 29 12:34:45 crc kubenswrapper[4753]: I0129 12:34:45.076860 4753 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/e0033592-f961-4066-9026-3645b09f9524-plugins-conf\") on node \"crc\" DevicePath \"\"" Jan 29 12:34:45 crc kubenswrapper[4753]: I0129 12:34:45.078666 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01f1fcff-ebfe-4f44-9ed6-ea5308d4f2b6-kube-api-access-vfq8s" (OuterVolumeSpecName: "kube-api-access-vfq8s") pod "01f1fcff-ebfe-4f44-9ed6-ea5308d4f2b6" (UID: "01f1fcff-ebfe-4f44-9ed6-ea5308d4f2b6"). InnerVolumeSpecName "kube-api-access-vfq8s". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:34:45 crc kubenswrapper[4753]: I0129 12:34:45.079400 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e0033592-f961-4066-9026-3645b09f9524-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "e0033592-f961-4066-9026-3645b09f9524" (UID: "e0033592-f961-4066-9026-3645b09f9524"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:34:45 crc kubenswrapper[4753]: I0129 12:34:45.079720 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/e0033592-f961-4066-9026-3645b09f9524-pod-info" (OuterVolumeSpecName: "pod-info") pod "e0033592-f961-4066-9026-3645b09f9524" (UID: "e0033592-f961-4066-9026-3645b09f9524"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Jan 29 12:34:45 crc kubenswrapper[4753]: I0129 12:34:45.081860 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e0033592-f961-4066-9026-3645b09f9524-kube-api-access-x9czh" (OuterVolumeSpecName: "kube-api-access-x9czh") pod "e0033592-f961-4066-9026-3645b09f9524" (UID: "e0033592-f961-4066-9026-3645b09f9524"). InnerVolumeSpecName "kube-api-access-x9czh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:34:45 crc kubenswrapper[4753]: I0129 12:34:45.085727 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-85f927ae-4eda-4b12-9ccc-0a8b9a462f5d" (OuterVolumeSpecName: "persistence") pod "e0033592-f961-4066-9026-3645b09f9524" (UID: "e0033592-f961-4066-9026-3645b09f9524"). InnerVolumeSpecName "pvc-85f927ae-4eda-4b12-9ccc-0a8b9a462f5d". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 29 12:34:45 crc kubenswrapper[4753]: I0129 12:34:45.137333 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e0033592-f961-4066-9026-3645b09f9524-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "e0033592-f961-4066-9026-3645b09f9524" (UID: "e0033592-f961-4066-9026-3645b09f9524"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:34:45 crc kubenswrapper[4753]: I0129 12:34:45.178618 4753 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-85f927ae-4eda-4b12-9ccc-0a8b9a462f5d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-85f927ae-4eda-4b12-9ccc-0a8b9a462f5d\") on node \"crc\" " Jan 29 12:34:45 crc kubenswrapper[4753]: I0129 12:34:45.178661 4753 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/e0033592-f961-4066-9026-3645b09f9524-pod-info\") on node \"crc\" DevicePath \"\"" Jan 29 12:34:45 crc kubenswrapper[4753]: I0129 12:34:45.178674 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vfq8s\" (UniqueName: \"kubernetes.io/projected/01f1fcff-ebfe-4f44-9ed6-ea5308d4f2b6-kube-api-access-vfq8s\") on node \"crc\" DevicePath \"\"" Jan 29 12:34:45 crc kubenswrapper[4753]: I0129 12:34:45.178688 4753 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/e0033592-f961-4066-9026-3645b09f9524-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Jan 29 12:34:45 crc kubenswrapper[4753]: I0129 12:34:45.178698 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x9czh\" (UniqueName: \"kubernetes.io/projected/e0033592-f961-4066-9026-3645b09f9524-kube-api-access-x9czh\") on node \"crc\" DevicePath \"\"" Jan 29 12:34:45 crc kubenswrapper[4753]: I0129 12:34:45.178707 4753 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/e0033592-f961-4066-9026-3645b09f9524-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Jan 29 12:34:45 crc kubenswrapper[4753]: I0129 12:34:45.190374 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-f8bdff7d9-wjc8b" event={"ID":"f49f1749-fe9d-4a18-b81c-65ab628d882e","Type":"ContainerDied","Data":"4586156451ae70cb31c219357d05eb97ecab418c2110ff017dc70aee04addecc"} Jan 29 12:34:45 crc kubenswrapper[4753]: I0129 12:34:45.190391 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-f8bdff7d9-wjc8b" Jan 29 12:34:45 crc kubenswrapper[4753]: I0129 12:34:45.190529 4753 scope.go:117] "RemoveContainer" containerID="ec4fcea80b360bf897b1acf454663f063df4e89ac871de1e919e9cbb26ef50b8" Jan 29 12:34:45 crc kubenswrapper[4753]: I0129 12:34:45.192307 4753 generic.go:334] "Generic (PLEG): container finished" podID="01f1fcff-ebfe-4f44-9ed6-ea5308d4f2b6" containerID="82e9d7a70f0b4833f80f4d86dd3202dbe66cf538ebb9baf0f943b314054e5c86" exitCode=0 Jan 29 12:34:45 crc kubenswrapper[4753]: I0129 12:34:45.192340 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-index-g5k7d" Jan 29 12:34:45 crc kubenswrapper[4753]: I0129 12:34:45.192380 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-index-g5k7d" event={"ID":"01f1fcff-ebfe-4f44-9ed6-ea5308d4f2b6","Type":"ContainerDied","Data":"82e9d7a70f0b4833f80f4d86dd3202dbe66cf538ebb9baf0f943b314054e5c86"} Jan 29 12:34:45 crc kubenswrapper[4753]: I0129 12:34:45.192409 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-index-g5k7d" event={"ID":"01f1fcff-ebfe-4f44-9ed6-ea5308d4f2b6","Type":"ContainerDied","Data":"cb6741383d82137889fe7863df9f7133eca961473d0aecfd85df91ed485c3eb7"} Jan 29 12:34:45 crc kubenswrapper[4753]: I0129 12:34:45.194474 4753 generic.go:334] "Generic (PLEG): container finished" podID="e0033592-f961-4066-9026-3645b09f9524" containerID="108d86555b06dd78575168c0015609068de2878c9d5b7f8b53f63cb72ed39786" exitCode=0 Jan 29 12:34:45 crc kubenswrapper[4753]: I0129 12:34:45.194507 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/rabbitmq-server-0" event={"ID":"e0033592-f961-4066-9026-3645b09f9524","Type":"ContainerDied","Data":"108d86555b06dd78575168c0015609068de2878c9d5b7f8b53f63cb72ed39786"} Jan 29 12:34:45 crc kubenswrapper[4753]: I0129 12:34:45.194527 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/rabbitmq-server-0" event={"ID":"e0033592-f961-4066-9026-3645b09f9524","Type":"ContainerDied","Data":"bfd2c866a6ee1d1ec8c2ef5bf56c9a201fcfc285cd8fa6564a5e96db8054ee36"} Jan 29 12:34:45 crc kubenswrapper[4753]: I0129 12:34:45.194782 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/rabbitmq-server-0" Jan 29 12:34:45 crc kubenswrapper[4753]: I0129 12:34:45.201726 4753 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Jan 29 12:34:45 crc kubenswrapper[4753]: I0129 12:34:45.201995 4753 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-85f927ae-4eda-4b12-9ccc-0a8b9a462f5d" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-85f927ae-4eda-4b12-9ccc-0a8b9a462f5d") on node "crc" Jan 29 12:34:45 crc kubenswrapper[4753]: I0129 12:34:45.218024 4753 scope.go:117] "RemoveContainer" containerID="82e9d7a70f0b4833f80f4d86dd3202dbe66cf538ebb9baf0f943b314054e5c86" Jan 29 12:34:45 crc kubenswrapper[4753]: I0129 12:34:45.233672 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/swift-operator-controller-manager-f8bdff7d9-wjc8b"] Jan 29 12:34:45 crc kubenswrapper[4753]: I0129 12:34:45.246097 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/swift-operator-controller-manager-f8bdff7d9-wjc8b"] Jan 29 12:34:45 crc kubenswrapper[4753]: I0129 12:34:45.253099 4753 scope.go:117] "RemoveContainer" containerID="82e9d7a70f0b4833f80f4d86dd3202dbe66cf538ebb9baf0f943b314054e5c86" Jan 29 12:34:45 crc kubenswrapper[4753]: E0129 12:34:45.253678 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"82e9d7a70f0b4833f80f4d86dd3202dbe66cf538ebb9baf0f943b314054e5c86\": container with ID starting with 82e9d7a70f0b4833f80f4d86dd3202dbe66cf538ebb9baf0f943b314054e5c86 not found: ID does not exist" containerID="82e9d7a70f0b4833f80f4d86dd3202dbe66cf538ebb9baf0f943b314054e5c86" Jan 29 12:34:45 crc kubenswrapper[4753]: I0129 12:34:45.253743 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"82e9d7a70f0b4833f80f4d86dd3202dbe66cf538ebb9baf0f943b314054e5c86"} err="failed to get container status \"82e9d7a70f0b4833f80f4d86dd3202dbe66cf538ebb9baf0f943b314054e5c86\": rpc error: code = NotFound desc = could not find container \"82e9d7a70f0b4833f80f4d86dd3202dbe66cf538ebb9baf0f943b314054e5c86\": container with ID starting with 82e9d7a70f0b4833f80f4d86dd3202dbe66cf538ebb9baf0f943b314054e5c86 not found: ID does not exist" Jan 29 12:34:45 crc kubenswrapper[4753]: I0129 12:34:45.253773 4753 scope.go:117] "RemoveContainer" containerID="108d86555b06dd78575168c0015609068de2878c9d5b7f8b53f63cb72ed39786" Jan 29 12:34:45 crc kubenswrapper[4753]: I0129 12:34:45.256544 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/swift-operator-index-g5k7d"] Jan 29 12:34:45 crc kubenswrapper[4753]: I0129 12:34:45.269147 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/swift-operator-index-g5k7d"] Jan 29 12:34:45 crc kubenswrapper[4753]: I0129 12:34:45.275763 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["swift-kuttl-tests/rabbitmq-server-0"] Jan 29 12:34:45 crc kubenswrapper[4753]: I0129 12:34:45.279221 4753 scope.go:117] "RemoveContainer" containerID="e1a5e4d95d6e246f97e352beda8f240200743bb7e6d02fdc3e76e9f4b225d812" Jan 29 12:34:45 crc kubenswrapper[4753]: I0129 12:34:45.279967 4753 reconciler_common.go:293] "Volume detached for volume \"pvc-85f927ae-4eda-4b12-9ccc-0a8b9a462f5d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-85f927ae-4eda-4b12-9ccc-0a8b9a462f5d\") on node \"crc\" DevicePath \"\"" Jan 29 12:34:45 crc kubenswrapper[4753]: I0129 12:34:45.282749 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["swift-kuttl-tests/rabbitmq-server-0"] Jan 29 12:34:45 crc kubenswrapper[4753]: I0129 12:34:45.298902 4753 scope.go:117] "RemoveContainer" containerID="108d86555b06dd78575168c0015609068de2878c9d5b7f8b53f63cb72ed39786" Jan 29 12:34:45 crc kubenswrapper[4753]: E0129 12:34:45.299416 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"108d86555b06dd78575168c0015609068de2878c9d5b7f8b53f63cb72ed39786\": container with ID starting with 108d86555b06dd78575168c0015609068de2878c9d5b7f8b53f63cb72ed39786 not found: ID does not exist" containerID="108d86555b06dd78575168c0015609068de2878c9d5b7f8b53f63cb72ed39786" Jan 29 12:34:45 crc kubenswrapper[4753]: I0129 12:34:45.299460 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"108d86555b06dd78575168c0015609068de2878c9d5b7f8b53f63cb72ed39786"} err="failed to get container status \"108d86555b06dd78575168c0015609068de2878c9d5b7f8b53f63cb72ed39786\": rpc error: code = NotFound desc = could not find container \"108d86555b06dd78575168c0015609068de2878c9d5b7f8b53f63cb72ed39786\": container with ID starting with 108d86555b06dd78575168c0015609068de2878c9d5b7f8b53f63cb72ed39786 not found: ID does not exist" Jan 29 12:34:45 crc kubenswrapper[4753]: I0129 12:34:45.299492 4753 scope.go:117] "RemoveContainer" containerID="e1a5e4d95d6e246f97e352beda8f240200743bb7e6d02fdc3e76e9f4b225d812" Jan 29 12:34:45 crc kubenswrapper[4753]: E0129 12:34:45.299876 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e1a5e4d95d6e246f97e352beda8f240200743bb7e6d02fdc3e76e9f4b225d812\": container with ID starting with e1a5e4d95d6e246f97e352beda8f240200743bb7e6d02fdc3e76e9f4b225d812 not found: ID does not exist" containerID="e1a5e4d95d6e246f97e352beda8f240200743bb7e6d02fdc3e76e9f4b225d812" Jan 29 12:34:45 crc kubenswrapper[4753]: I0129 12:34:45.299907 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e1a5e4d95d6e246f97e352beda8f240200743bb7e6d02fdc3e76e9f4b225d812"} err="failed to get container status \"e1a5e4d95d6e246f97e352beda8f240200743bb7e6d02fdc3e76e9f4b225d812\": rpc error: code = NotFound desc = could not find container \"e1a5e4d95d6e246f97e352beda8f240200743bb7e6d02fdc3e76e9f4b225d812\": container with ID starting with e1a5e4d95d6e246f97e352beda8f240200743bb7e6d02fdc3e76e9f4b225d812 not found: ID does not exist" Jan 29 12:34:45 crc kubenswrapper[4753]: I0129 12:34:45.857694 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/openstack-galera-0" Jan 29 12:34:45 crc kubenswrapper[4753]: I0129 12:34:45.901478 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01f1fcff-ebfe-4f44-9ed6-ea5308d4f2b6" path="/var/lib/kubelet/pods/01f1fcff-ebfe-4f44-9ed6-ea5308d4f2b6/volumes" Jan 29 12:34:45 crc kubenswrapper[4753]: I0129 12:34:45.902007 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="952f0548-3b0e-497c-98f1-b34e76762459" path="/var/lib/kubelet/pods/952f0548-3b0e-497c-98f1-b34e76762459/volumes" Jan 29 12:34:45 crc kubenswrapper[4753]: I0129 12:34:45.902685 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd1f7149-df12-4c02-9585-238bfd26f12d" path="/var/lib/kubelet/pods/bd1f7149-df12-4c02-9585-238bfd26f12d/volumes" Jan 29 12:34:45 crc kubenswrapper[4753]: I0129 12:34:45.903797 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e0033592-f961-4066-9026-3645b09f9524" path="/var/lib/kubelet/pods/e0033592-f961-4066-9026-3645b09f9524/volumes" Jan 29 12:34:45 crc kubenswrapper[4753]: I0129 12:34:45.904472 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f49f1749-fe9d-4a18-b81c-65ab628d882e" path="/var/lib/kubelet/pods/f49f1749-fe9d-4a18-b81c-65ab628d882e/volumes" Jan 29 12:34:45 crc kubenswrapper[4753]: I0129 12:34:45.990453 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qckl8\" (UniqueName: \"kubernetes.io/projected/6762f214-cfd1-4314-9fa0-1d2b40e87b4e-kube-api-access-qckl8\") pod \"6762f214-cfd1-4314-9fa0-1d2b40e87b4e\" (UID: \"6762f214-cfd1-4314-9fa0-1d2b40e87b4e\") " Jan 29 12:34:45 crc kubenswrapper[4753]: I0129 12:34:45.990911 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/6762f214-cfd1-4314-9fa0-1d2b40e87b4e-config-data-generated\") pod \"6762f214-cfd1-4314-9fa0-1d2b40e87b4e\" (UID: \"6762f214-cfd1-4314-9fa0-1d2b40e87b4e\") " Jan 29 12:34:45 crc kubenswrapper[4753]: I0129 12:34:45.990990 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6762f214-cfd1-4314-9fa0-1d2b40e87b4e-operator-scripts\") pod \"6762f214-cfd1-4314-9fa0-1d2b40e87b4e\" (UID: \"6762f214-cfd1-4314-9fa0-1d2b40e87b4e\") " Jan 29 12:34:45 crc kubenswrapper[4753]: I0129 12:34:45.991063 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/6762f214-cfd1-4314-9fa0-1d2b40e87b4e-kolla-config\") pod \"6762f214-cfd1-4314-9fa0-1d2b40e87b4e\" (UID: \"6762f214-cfd1-4314-9fa0-1d2b40e87b4e\") " Jan 29 12:34:45 crc kubenswrapper[4753]: I0129 12:34:45.991133 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/6762f214-cfd1-4314-9fa0-1d2b40e87b4e-config-data-default\") pod \"6762f214-cfd1-4314-9fa0-1d2b40e87b4e\" (UID: \"6762f214-cfd1-4314-9fa0-1d2b40e87b4e\") " Jan 29 12:34:45 crc kubenswrapper[4753]: I0129 12:34:45.991173 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mysql-db\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"6762f214-cfd1-4314-9fa0-1d2b40e87b4e\" (UID: \"6762f214-cfd1-4314-9fa0-1d2b40e87b4e\") " Jan 29 12:34:45 crc kubenswrapper[4753]: I0129 12:34:45.991633 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6762f214-cfd1-4314-9fa0-1d2b40e87b4e-config-data-generated" (OuterVolumeSpecName: "config-data-generated") pod "6762f214-cfd1-4314-9fa0-1d2b40e87b4e" (UID: "6762f214-cfd1-4314-9fa0-1d2b40e87b4e"). InnerVolumeSpecName "config-data-generated". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:34:45 crc kubenswrapper[4753]: I0129 12:34:45.991663 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6762f214-cfd1-4314-9fa0-1d2b40e87b4e-kolla-config" (OuterVolumeSpecName: "kolla-config") pod "6762f214-cfd1-4314-9fa0-1d2b40e87b4e" (UID: "6762f214-cfd1-4314-9fa0-1d2b40e87b4e"). InnerVolumeSpecName "kolla-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:34:45 crc kubenswrapper[4753]: I0129 12:34:45.991911 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6762f214-cfd1-4314-9fa0-1d2b40e87b4e-config-data-default" (OuterVolumeSpecName: "config-data-default") pod "6762f214-cfd1-4314-9fa0-1d2b40e87b4e" (UID: "6762f214-cfd1-4314-9fa0-1d2b40e87b4e"). InnerVolumeSpecName "config-data-default". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:34:45 crc kubenswrapper[4753]: I0129 12:34:45.992100 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6762f214-cfd1-4314-9fa0-1d2b40e87b4e-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "6762f214-cfd1-4314-9fa0-1d2b40e87b4e" (UID: "6762f214-cfd1-4314-9fa0-1d2b40e87b4e"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:34:46 crc kubenswrapper[4753]: I0129 12:34:46.002892 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6762f214-cfd1-4314-9fa0-1d2b40e87b4e-kube-api-access-qckl8" (OuterVolumeSpecName: "kube-api-access-qckl8") pod "6762f214-cfd1-4314-9fa0-1d2b40e87b4e" (UID: "6762f214-cfd1-4314-9fa0-1d2b40e87b4e"). InnerVolumeSpecName "kube-api-access-qckl8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:34:46 crc kubenswrapper[4753]: I0129 12:34:46.017153 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage10-crc" (OuterVolumeSpecName: "mysql-db") pod "6762f214-cfd1-4314-9fa0-1d2b40e87b4e" (UID: "6762f214-cfd1-4314-9fa0-1d2b40e87b4e"). InnerVolumeSpecName "local-storage10-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 29 12:34:46 crc kubenswrapper[4753]: I0129 12:34:46.092978 4753 reconciler_common.go:293] "Volume detached for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/6762f214-cfd1-4314-9fa0-1d2b40e87b4e-config-data-generated\") on node \"crc\" DevicePath \"\"" Jan 29 12:34:46 crc kubenswrapper[4753]: I0129 12:34:46.093037 4753 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6762f214-cfd1-4314-9fa0-1d2b40e87b4e-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 12:34:46 crc kubenswrapper[4753]: I0129 12:34:46.093049 4753 reconciler_common.go:293] "Volume detached for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/6762f214-cfd1-4314-9fa0-1d2b40e87b4e-kolla-config\") on node \"crc\" DevicePath \"\"" Jan 29 12:34:46 crc kubenswrapper[4753]: I0129 12:34:46.093060 4753 reconciler_common.go:293] "Volume detached for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/6762f214-cfd1-4314-9fa0-1d2b40e87b4e-config-data-default\") on node \"crc\" DevicePath \"\"" Jan 29 12:34:46 crc kubenswrapper[4753]: I0129 12:34:46.093104 4753 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" " Jan 29 12:34:46 crc kubenswrapper[4753]: I0129 12:34:46.093115 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qckl8\" (UniqueName: \"kubernetes.io/projected/6762f214-cfd1-4314-9fa0-1d2b40e87b4e-kube-api-access-qckl8\") on node \"crc\" DevicePath \"\"" Jan 29 12:34:46 crc kubenswrapper[4753]: I0129 12:34:46.105665 4753 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage10-crc" (UniqueName: "kubernetes.io/local-volume/local-storage10-crc") on node "crc" Jan 29 12:34:46 crc kubenswrapper[4753]: I0129 12:34:46.160111 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/openstack-galera-1" Jan 29 12:34:46 crc kubenswrapper[4753]: I0129 12:34:46.194882 4753 reconciler_common.go:293] "Volume detached for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" DevicePath \"\"" Jan 29 12:34:46 crc kubenswrapper[4753]: I0129 12:34:46.360898 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/89e38fc2-9f07-45f0-8cdc-b77931872d7b-operator-scripts\") pod \"89e38fc2-9f07-45f0-8cdc-b77931872d7b\" (UID: \"89e38fc2-9f07-45f0-8cdc-b77931872d7b\") " Jan 29 12:34:46 crc kubenswrapper[4753]: I0129 12:34:46.360989 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mysql-db\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"89e38fc2-9f07-45f0-8cdc-b77931872d7b\" (UID: \"89e38fc2-9f07-45f0-8cdc-b77931872d7b\") " Jan 29 12:34:46 crc kubenswrapper[4753]: I0129 12:34:46.361034 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/89e38fc2-9f07-45f0-8cdc-b77931872d7b-config-data-generated\") pod \"89e38fc2-9f07-45f0-8cdc-b77931872d7b\" (UID: \"89e38fc2-9f07-45f0-8cdc-b77931872d7b\") " Jan 29 12:34:46 crc kubenswrapper[4753]: I0129 12:34:46.361066 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6zlck\" (UniqueName: \"kubernetes.io/projected/89e38fc2-9f07-45f0-8cdc-b77931872d7b-kube-api-access-6zlck\") pod \"89e38fc2-9f07-45f0-8cdc-b77931872d7b\" (UID: \"89e38fc2-9f07-45f0-8cdc-b77931872d7b\") " Jan 29 12:34:46 crc kubenswrapper[4753]: I0129 12:34:46.361107 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/89e38fc2-9f07-45f0-8cdc-b77931872d7b-config-data-default\") pod \"89e38fc2-9f07-45f0-8cdc-b77931872d7b\" (UID: \"89e38fc2-9f07-45f0-8cdc-b77931872d7b\") " Jan 29 12:34:46 crc kubenswrapper[4753]: I0129 12:34:46.361139 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/89e38fc2-9f07-45f0-8cdc-b77931872d7b-kolla-config\") pod \"89e38fc2-9f07-45f0-8cdc-b77931872d7b\" (UID: \"89e38fc2-9f07-45f0-8cdc-b77931872d7b\") " Jan 29 12:34:46 crc kubenswrapper[4753]: I0129 12:34:46.362262 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/89e38fc2-9f07-45f0-8cdc-b77931872d7b-kolla-config" (OuterVolumeSpecName: "kolla-config") pod "89e38fc2-9f07-45f0-8cdc-b77931872d7b" (UID: "89e38fc2-9f07-45f0-8cdc-b77931872d7b"). InnerVolumeSpecName "kolla-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:34:46 crc kubenswrapper[4753]: I0129 12:34:46.363092 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/89e38fc2-9f07-45f0-8cdc-b77931872d7b-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "89e38fc2-9f07-45f0-8cdc-b77931872d7b" (UID: "89e38fc2-9f07-45f0-8cdc-b77931872d7b"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:34:46 crc kubenswrapper[4753]: I0129 12:34:46.366152 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/89e38fc2-9f07-45f0-8cdc-b77931872d7b-config-data-generated" (OuterVolumeSpecName: "config-data-generated") pod "89e38fc2-9f07-45f0-8cdc-b77931872d7b" (UID: "89e38fc2-9f07-45f0-8cdc-b77931872d7b"). InnerVolumeSpecName "config-data-generated". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:34:46 crc kubenswrapper[4753]: I0129 12:34:46.366640 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/89e38fc2-9f07-45f0-8cdc-b77931872d7b-config-data-default" (OuterVolumeSpecName: "config-data-default") pod "89e38fc2-9f07-45f0-8cdc-b77931872d7b" (UID: "89e38fc2-9f07-45f0-8cdc-b77931872d7b"). InnerVolumeSpecName "config-data-default". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:34:46 crc kubenswrapper[4753]: I0129 12:34:46.374132 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/89e38fc2-9f07-45f0-8cdc-b77931872d7b-kube-api-access-6zlck" (OuterVolumeSpecName: "kube-api-access-6zlck") pod "89e38fc2-9f07-45f0-8cdc-b77931872d7b" (UID: "89e38fc2-9f07-45f0-8cdc-b77931872d7b"). InnerVolumeSpecName "kube-api-access-6zlck". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:34:46 crc kubenswrapper[4753]: I0129 12:34:46.378208 4753 generic.go:334] "Generic (PLEG): container finished" podID="6762f214-cfd1-4314-9fa0-1d2b40e87b4e" containerID="955c359e87ea643dd73d843cc3480910edf3ef440c787c82d01331b918f7cb58" exitCode=0 Jan 29 12:34:46 crc kubenswrapper[4753]: I0129 12:34:46.378302 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/openstack-galera-0" event={"ID":"6762f214-cfd1-4314-9fa0-1d2b40e87b4e","Type":"ContainerDied","Data":"955c359e87ea643dd73d843cc3480910edf3ef440c787c82d01331b918f7cb58"} Jan 29 12:34:46 crc kubenswrapper[4753]: I0129 12:34:46.378332 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/openstack-galera-0" event={"ID":"6762f214-cfd1-4314-9fa0-1d2b40e87b4e","Type":"ContainerDied","Data":"c9bd144c9fff972491689a269099cc2a7a35963bbf70b4907d949fc955bce913"} Jan 29 12:34:46 crc kubenswrapper[4753]: I0129 12:34:46.378350 4753 scope.go:117] "RemoveContainer" containerID="955c359e87ea643dd73d843cc3480910edf3ef440c787c82d01331b918f7cb58" Jan 29 12:34:46 crc kubenswrapper[4753]: I0129 12:34:46.378361 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/openstack-galera-0" Jan 29 12:34:46 crc kubenswrapper[4753]: I0129 12:34:46.384339 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage01-crc" (OuterVolumeSpecName: "mysql-db") pod "89e38fc2-9f07-45f0-8cdc-b77931872d7b" (UID: "89e38fc2-9f07-45f0-8cdc-b77931872d7b"). InnerVolumeSpecName "local-storage01-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 29 12:34:46 crc kubenswrapper[4753]: I0129 12:34:46.401305 4753 generic.go:334] "Generic (PLEG): container finished" podID="89e38fc2-9f07-45f0-8cdc-b77931872d7b" containerID="fd2d4883cbb5e061192a0055cb839eaec1e2e1653c1b4339edbd2330756bbef4" exitCode=0 Jan 29 12:34:46 crc kubenswrapper[4753]: I0129 12:34:46.401489 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/openstack-galera-1" event={"ID":"89e38fc2-9f07-45f0-8cdc-b77931872d7b","Type":"ContainerDied","Data":"fd2d4883cbb5e061192a0055cb839eaec1e2e1653c1b4339edbd2330756bbef4"} Jan 29 12:34:46 crc kubenswrapper[4753]: I0129 12:34:46.401570 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/openstack-galera-1" Jan 29 12:34:46 crc kubenswrapper[4753]: I0129 12:34:46.401577 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/openstack-galera-1" event={"ID":"89e38fc2-9f07-45f0-8cdc-b77931872d7b","Type":"ContainerDied","Data":"776f09c496322a7720210fa16c8094598b4a27e6914b1aca53b45825a605ac4d"} Jan 29 12:34:46 crc kubenswrapper[4753]: I0129 12:34:46.418788 4753 scope.go:117] "RemoveContainer" containerID="986ff07fe034f7d5e0217ddc0a81108f405dcdf609bc954e6240eac23f6231c0" Jan 29 12:34:46 crc kubenswrapper[4753]: I0129 12:34:46.448522 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["swift-kuttl-tests/openstack-galera-0"] Jan 29 12:34:46 crc kubenswrapper[4753]: I0129 12:34:46.451844 4753 scope.go:117] "RemoveContainer" containerID="955c359e87ea643dd73d843cc3480910edf3ef440c787c82d01331b918f7cb58" Jan 29 12:34:46 crc kubenswrapper[4753]: E0129 12:34:46.452521 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"955c359e87ea643dd73d843cc3480910edf3ef440c787c82d01331b918f7cb58\": container with ID starting with 955c359e87ea643dd73d843cc3480910edf3ef440c787c82d01331b918f7cb58 not found: ID does not exist" containerID="955c359e87ea643dd73d843cc3480910edf3ef440c787c82d01331b918f7cb58" Jan 29 12:34:46 crc kubenswrapper[4753]: I0129 12:34:46.452555 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"955c359e87ea643dd73d843cc3480910edf3ef440c787c82d01331b918f7cb58"} err="failed to get container status \"955c359e87ea643dd73d843cc3480910edf3ef440c787c82d01331b918f7cb58\": rpc error: code = NotFound desc = could not find container \"955c359e87ea643dd73d843cc3480910edf3ef440c787c82d01331b918f7cb58\": container with ID starting with 955c359e87ea643dd73d843cc3480910edf3ef440c787c82d01331b918f7cb58 not found: ID does not exist" Jan 29 12:34:46 crc kubenswrapper[4753]: I0129 12:34:46.452579 4753 scope.go:117] "RemoveContainer" containerID="986ff07fe034f7d5e0217ddc0a81108f405dcdf609bc954e6240eac23f6231c0" Jan 29 12:34:46 crc kubenswrapper[4753]: E0129 12:34:46.452829 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"986ff07fe034f7d5e0217ddc0a81108f405dcdf609bc954e6240eac23f6231c0\": container with ID starting with 986ff07fe034f7d5e0217ddc0a81108f405dcdf609bc954e6240eac23f6231c0 not found: ID does not exist" containerID="986ff07fe034f7d5e0217ddc0a81108f405dcdf609bc954e6240eac23f6231c0" Jan 29 12:34:46 crc kubenswrapper[4753]: I0129 12:34:46.452848 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"986ff07fe034f7d5e0217ddc0a81108f405dcdf609bc954e6240eac23f6231c0"} err="failed to get container status \"986ff07fe034f7d5e0217ddc0a81108f405dcdf609bc954e6240eac23f6231c0\": rpc error: code = NotFound desc = could not find container \"986ff07fe034f7d5e0217ddc0a81108f405dcdf609bc954e6240eac23f6231c0\": container with ID starting with 986ff07fe034f7d5e0217ddc0a81108f405dcdf609bc954e6240eac23f6231c0 not found: ID does not exist" Jan 29 12:34:46 crc kubenswrapper[4753]: I0129 12:34:46.452859 4753 scope.go:117] "RemoveContainer" containerID="fd2d4883cbb5e061192a0055cb839eaec1e2e1653c1b4339edbd2330756bbef4" Jan 29 12:34:46 crc kubenswrapper[4753]: I0129 12:34:46.455845 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["swift-kuttl-tests/openstack-galera-0"] Jan 29 12:34:46 crc kubenswrapper[4753]: I0129 12:34:46.462218 4753 reconciler_common.go:293] "Volume detached for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/89e38fc2-9f07-45f0-8cdc-b77931872d7b-config-data-generated\") on node \"crc\" DevicePath \"\"" Jan 29 12:34:46 crc kubenswrapper[4753]: I0129 12:34:46.462271 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6zlck\" (UniqueName: \"kubernetes.io/projected/89e38fc2-9f07-45f0-8cdc-b77931872d7b-kube-api-access-6zlck\") on node \"crc\" DevicePath \"\"" Jan 29 12:34:46 crc kubenswrapper[4753]: I0129 12:34:46.462283 4753 reconciler_common.go:293] "Volume detached for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/89e38fc2-9f07-45f0-8cdc-b77931872d7b-config-data-default\") on node \"crc\" DevicePath \"\"" Jan 29 12:34:46 crc kubenswrapper[4753]: I0129 12:34:46.462297 4753 reconciler_common.go:293] "Volume detached for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/89e38fc2-9f07-45f0-8cdc-b77931872d7b-kolla-config\") on node \"crc\" DevicePath \"\"" Jan 29 12:34:46 crc kubenswrapper[4753]: I0129 12:34:46.462308 4753 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/89e38fc2-9f07-45f0-8cdc-b77931872d7b-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 12:34:46 crc kubenswrapper[4753]: I0129 12:34:46.462422 4753 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" " Jan 29 12:34:46 crc kubenswrapper[4753]: I0129 12:34:46.463785 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["swift-kuttl-tests/openstack-galera-1"] Jan 29 12:34:46 crc kubenswrapper[4753]: I0129 12:34:46.469787 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["swift-kuttl-tests/openstack-galera-1"] Jan 29 12:34:46 crc kubenswrapper[4753]: I0129 12:34:46.471162 4753 scope.go:117] "RemoveContainer" containerID="4801ace0c26402ef4218df8ec6892a76c6d150ad4031dff6e3e9e00bf547217d" Jan 29 12:34:46 crc kubenswrapper[4753]: I0129 12:34:46.478164 4753 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage01-crc" (UniqueName: "kubernetes.io/local-volume/local-storage01-crc") on node "crc" Jan 29 12:34:46 crc kubenswrapper[4753]: I0129 12:34:46.492183 4753 scope.go:117] "RemoveContainer" containerID="fd2d4883cbb5e061192a0055cb839eaec1e2e1653c1b4339edbd2330756bbef4" Jan 29 12:34:46 crc kubenswrapper[4753]: E0129 12:34:46.492834 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fd2d4883cbb5e061192a0055cb839eaec1e2e1653c1b4339edbd2330756bbef4\": container with ID starting with fd2d4883cbb5e061192a0055cb839eaec1e2e1653c1b4339edbd2330756bbef4 not found: ID does not exist" containerID="fd2d4883cbb5e061192a0055cb839eaec1e2e1653c1b4339edbd2330756bbef4" Jan 29 12:34:46 crc kubenswrapper[4753]: I0129 12:34:46.492910 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fd2d4883cbb5e061192a0055cb839eaec1e2e1653c1b4339edbd2330756bbef4"} err="failed to get container status \"fd2d4883cbb5e061192a0055cb839eaec1e2e1653c1b4339edbd2330756bbef4\": rpc error: code = NotFound desc = could not find container \"fd2d4883cbb5e061192a0055cb839eaec1e2e1653c1b4339edbd2330756bbef4\": container with ID starting with fd2d4883cbb5e061192a0055cb839eaec1e2e1653c1b4339edbd2330756bbef4 not found: ID does not exist" Jan 29 12:34:46 crc kubenswrapper[4753]: I0129 12:34:46.492953 4753 scope.go:117] "RemoveContainer" containerID="4801ace0c26402ef4218df8ec6892a76c6d150ad4031dff6e3e9e00bf547217d" Jan 29 12:34:46 crc kubenswrapper[4753]: E0129 12:34:46.493565 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4801ace0c26402ef4218df8ec6892a76c6d150ad4031dff6e3e9e00bf547217d\": container with ID starting with 4801ace0c26402ef4218df8ec6892a76c6d150ad4031dff6e3e9e00bf547217d not found: ID does not exist" containerID="4801ace0c26402ef4218df8ec6892a76c6d150ad4031dff6e3e9e00bf547217d" Jan 29 12:34:46 crc kubenswrapper[4753]: I0129 12:34:46.493655 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4801ace0c26402ef4218df8ec6892a76c6d150ad4031dff6e3e9e00bf547217d"} err="failed to get container status \"4801ace0c26402ef4218df8ec6892a76c6d150ad4031dff6e3e9e00bf547217d\": rpc error: code = NotFound desc = could not find container \"4801ace0c26402ef4218df8ec6892a76c6d150ad4031dff6e3e9e00bf547217d\": container with ID starting with 4801ace0c26402ef4218df8ec6892a76c6d150ad4031dff6e3e9e00bf547217d not found: ID does not exist" Jan 29 12:34:46 crc kubenswrapper[4753]: I0129 12:34:46.563485 4753 reconciler_common.go:293] "Volume detached for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" DevicePath \"\"" Jan 29 12:34:47 crc kubenswrapper[4753]: I0129 12:34:47.897894 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6762f214-cfd1-4314-9fa0-1d2b40e87b4e" path="/var/lib/kubelet/pods/6762f214-cfd1-4314-9fa0-1d2b40e87b4e/volumes" Jan 29 12:34:47 crc kubenswrapper[4753]: I0129 12:34:47.899655 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="89e38fc2-9f07-45f0-8cdc-b77931872d7b" path="/var/lib/kubelet/pods/89e38fc2-9f07-45f0-8cdc-b77931872d7b/volumes" Jan 29 12:34:48 crc kubenswrapper[4753]: I0129 12:34:48.603179 4753 prober.go:107] "Probe failed" probeType="Readiness" pod="swift-kuttl-tests/memcached-0" podUID="bd1f7149-df12-4c02-9585-238bfd26f12d" containerName="memcached" probeResult="failure" output="dial tcp 10.217.0.57:11211: i/o timeout" Jan 29 12:34:48 crc kubenswrapper[4753]: I0129 12:34:48.901163 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-696f9bf98-6q66p"] Jan 29 12:34:48 crc kubenswrapper[4753]: I0129 12:34:48.902027 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/barbican-operator-controller-manager-696f9bf98-6q66p" podUID="3b46211c-99c7-44c9-8275-991e70edba9d" containerName="manager" containerID="cri-o://af77e7829ca3e4dec8874ed7b84afe03fc47c6ba2a92cf820ffeb3981b73db6a" gracePeriod=10 Jan 29 12:34:49 crc kubenswrapper[4753]: I0129 12:34:49.082640 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/barbican-operator-index-bjfgl"] Jan 29 12:34:49 crc kubenswrapper[4753]: I0129 12:34:49.082940 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/barbican-operator-index-bjfgl" podUID="c0945dd3-02c0-42cd-8a01-5ceba63d4fb7" containerName="registry-server" containerID="cri-o://a22969bbfa3ce15864f3cf3c75d2bd101d9459960abc7744f77f9aab405e2033" gracePeriod=30 Jan 29 12:34:49 crc kubenswrapper[4753]: I0129 12:34:49.148433 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/68da353acb70414e75cc7da1a6ad4f36343c2ed918d0517bd68193b1b3rcxvz"] Jan 29 12:34:49 crc kubenswrapper[4753]: I0129 12:34:49.153934 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/68da353acb70414e75cc7da1a6ad4f36343c2ed918d0517bd68193b1b3rcxvz"] Jan 29 12:34:49 crc kubenswrapper[4753]: I0129 12:34:49.486602 4753 generic.go:334] "Generic (PLEG): container finished" podID="3b46211c-99c7-44c9-8275-991e70edba9d" containerID="af77e7829ca3e4dec8874ed7b84afe03fc47c6ba2a92cf820ffeb3981b73db6a" exitCode=0 Jan 29 12:34:49 crc kubenswrapper[4753]: I0129 12:34:49.486776 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-696f9bf98-6q66p" event={"ID":"3b46211c-99c7-44c9-8275-991e70edba9d","Type":"ContainerDied","Data":"af77e7829ca3e4dec8874ed7b84afe03fc47c6ba2a92cf820ffeb3981b73db6a"} Jan 29 12:34:49 crc kubenswrapper[4753]: I0129 12:34:49.488987 4753 generic.go:334] "Generic (PLEG): container finished" podID="c0945dd3-02c0-42cd-8a01-5ceba63d4fb7" containerID="a22969bbfa3ce15864f3cf3c75d2bd101d9459960abc7744f77f9aab405e2033" exitCode=0 Jan 29 12:34:49 crc kubenswrapper[4753]: I0129 12:34:49.489044 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-index-bjfgl" event={"ID":"c0945dd3-02c0-42cd-8a01-5ceba63d4fb7","Type":"ContainerDied","Data":"a22969bbfa3ce15864f3cf3c75d2bd101d9459960abc7744f77f9aab405e2033"} Jan 29 12:34:49 crc kubenswrapper[4753]: I0129 12:34:49.542615 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-696f9bf98-6q66p" Jan 29 12:34:49 crc kubenswrapper[4753]: I0129 12:34:49.632061 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/3b46211c-99c7-44c9-8275-991e70edba9d-apiservice-cert\") pod \"3b46211c-99c7-44c9-8275-991e70edba9d\" (UID: \"3b46211c-99c7-44c9-8275-991e70edba9d\") " Jan 29 12:34:49 crc kubenswrapper[4753]: I0129 12:34:49.632417 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z54gg\" (UniqueName: \"kubernetes.io/projected/3b46211c-99c7-44c9-8275-991e70edba9d-kube-api-access-z54gg\") pod \"3b46211c-99c7-44c9-8275-991e70edba9d\" (UID: \"3b46211c-99c7-44c9-8275-991e70edba9d\") " Jan 29 12:34:49 crc kubenswrapper[4753]: I0129 12:34:49.632506 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/3b46211c-99c7-44c9-8275-991e70edba9d-webhook-cert\") pod \"3b46211c-99c7-44c9-8275-991e70edba9d\" (UID: \"3b46211c-99c7-44c9-8275-991e70edba9d\") " Jan 29 12:34:49 crc kubenswrapper[4753]: I0129 12:34:49.807625 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3b46211c-99c7-44c9-8275-991e70edba9d-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "3b46211c-99c7-44c9-8275-991e70edba9d" (UID: "3b46211c-99c7-44c9-8275-991e70edba9d"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:34:49 crc kubenswrapper[4753]: I0129 12:34:49.824142 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3b46211c-99c7-44c9-8275-991e70edba9d-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "3b46211c-99c7-44c9-8275-991e70edba9d" (UID: "3b46211c-99c7-44c9-8275-991e70edba9d"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:34:49 crc kubenswrapper[4753]: I0129 12:34:49.859483 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3b46211c-99c7-44c9-8275-991e70edba9d-kube-api-access-z54gg" (OuterVolumeSpecName: "kube-api-access-z54gg") pod "3b46211c-99c7-44c9-8275-991e70edba9d" (UID: "3b46211c-99c7-44c9-8275-991e70edba9d"). InnerVolumeSpecName "kube-api-access-z54gg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:34:49 crc kubenswrapper[4753]: I0129 12:34:49.898668 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z54gg\" (UniqueName: \"kubernetes.io/projected/3b46211c-99c7-44c9-8275-991e70edba9d-kube-api-access-z54gg\") on node \"crc\" DevicePath \"\"" Jan 29 12:34:49 crc kubenswrapper[4753]: I0129 12:34:49.898742 4753 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/3b46211c-99c7-44c9-8275-991e70edba9d-webhook-cert\") on node \"crc\" DevicePath \"\"" Jan 29 12:34:49 crc kubenswrapper[4753]: I0129 12:34:49.898753 4753 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/3b46211c-99c7-44c9-8275-991e70edba9d-apiservice-cert\") on node \"crc\" DevicePath \"\"" Jan 29 12:34:49 crc kubenswrapper[4753]: I0129 12:34:49.934799 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9651a675-c52e-44a3-8f8f-2381bfbefba7" path="/var/lib/kubelet/pods/9651a675-c52e-44a3-8f8f-2381bfbefba7/volumes" Jan 29 12:34:49 crc kubenswrapper[4753]: I0129 12:34:49.942294 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-index-bjfgl" Jan 29 12:34:50 crc kubenswrapper[4753]: I0129 12:34:50.183107 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4rlkh\" (UniqueName: \"kubernetes.io/projected/c0945dd3-02c0-42cd-8a01-5ceba63d4fb7-kube-api-access-4rlkh\") pod \"c0945dd3-02c0-42cd-8a01-5ceba63d4fb7\" (UID: \"c0945dd3-02c0-42cd-8a01-5ceba63d4fb7\") " Jan 29 12:34:50 crc kubenswrapper[4753]: I0129 12:34:50.190521 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c0945dd3-02c0-42cd-8a01-5ceba63d4fb7-kube-api-access-4rlkh" (OuterVolumeSpecName: "kube-api-access-4rlkh") pod "c0945dd3-02c0-42cd-8a01-5ceba63d4fb7" (UID: "c0945dd3-02c0-42cd-8a01-5ceba63d4fb7"). InnerVolumeSpecName "kube-api-access-4rlkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:34:50 crc kubenswrapper[4753]: I0129 12:34:50.285006 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4rlkh\" (UniqueName: \"kubernetes.io/projected/c0945dd3-02c0-42cd-8a01-5ceba63d4fb7-kube-api-access-4rlkh\") on node \"crc\" DevicePath \"\"" Jan 29 12:34:50 crc kubenswrapper[4753]: I0129 12:34:50.498991 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-696f9bf98-6q66p" event={"ID":"3b46211c-99c7-44c9-8275-991e70edba9d","Type":"ContainerDied","Data":"fcbcf922fda874c0af0c761cfc5cac92a0d0e03d6f7b1e5b69dc291a86ac9e97"} Jan 29 12:34:50 crc kubenswrapper[4753]: I0129 12:34:50.499062 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-696f9bf98-6q66p" Jan 29 12:34:50 crc kubenswrapper[4753]: I0129 12:34:50.499107 4753 scope.go:117] "RemoveContainer" containerID="af77e7829ca3e4dec8874ed7b84afe03fc47c6ba2a92cf820ffeb3981b73db6a" Jan 29 12:34:50 crc kubenswrapper[4753]: I0129 12:34:50.502652 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-index-bjfgl" event={"ID":"c0945dd3-02c0-42cd-8a01-5ceba63d4fb7","Type":"ContainerDied","Data":"12e9928da6f4b7c2a9513f4938e39e596406720d23e1e007b2005a81ebfececc"} Jan 29 12:34:50 crc kubenswrapper[4753]: I0129 12:34:50.502800 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-index-bjfgl" Jan 29 12:34:50 crc kubenswrapper[4753]: I0129 12:34:50.702163 4753 scope.go:117] "RemoveContainer" containerID="a22969bbfa3ce15864f3cf3c75d2bd101d9459960abc7744f77f9aab405e2033" Jan 29 12:34:50 crc kubenswrapper[4753]: I0129 12:34:50.708036 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-696f9bf98-6q66p"] Jan 29 12:34:50 crc kubenswrapper[4753]: I0129 12:34:50.719335 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-696f9bf98-6q66p"] Jan 29 12:34:50 crc kubenswrapper[4753]: I0129 12:34:50.724681 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/barbican-operator-index-bjfgl"] Jan 29 12:34:50 crc kubenswrapper[4753]: I0129 12:34:50.728913 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/barbican-operator-index-bjfgl"] Jan 29 12:34:52 crc kubenswrapper[4753]: I0129 12:34:52.056911 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3b46211c-99c7-44c9-8275-991e70edba9d" path="/var/lib/kubelet/pods/3b46211c-99c7-44c9-8275-991e70edba9d/volumes" Jan 29 12:34:52 crc kubenswrapper[4753]: I0129 12:34:52.057881 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c0945dd3-02c0-42cd-8a01-5ceba63d4fb7" path="/var/lib/kubelet/pods/c0945dd3-02c0-42cd-8a01-5ceba63d4fb7/volumes" Jan 29 12:34:52 crc kubenswrapper[4753]: I0129 12:34:52.100694 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-6c7b546b79-zwbp5"] Jan 29 12:34:52 crc kubenswrapper[4753]: I0129 12:34:52.100978 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/keystone-operator-controller-manager-6c7b546b79-zwbp5" podUID="d77d4839-0017-46ba-8f19-d8ddc01a1878" containerName="manager" containerID="cri-o://7122b083e3807974cb87d059398a493bae9ffc5a54e1b8b45993cf9c52e14311" gracePeriod=10 Jan 29 12:34:52 crc kubenswrapper[4753]: I0129 12:34:52.207284 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/keystone-operator-index-qzjgm"] Jan 29 12:34:52 crc kubenswrapper[4753]: I0129 12:34:52.207504 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/keystone-operator-index-qzjgm" podUID="edf5d166-7a55-4ac0-ab98-ff0f503fd9a6" containerName="registry-server" containerID="cri-o://85106337f75ed28f3f76698f58fc7ceeafba753148daf1aab371218951585671" gracePeriod=30 Jan 29 12:34:52 crc kubenswrapper[4753]: I0129 12:34:52.248117 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4ef5xgr2"] Jan 29 12:34:52 crc kubenswrapper[4753]: I0129 12:34:52.253866 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4ef5xgr2"] Jan 29 12:34:52 crc kubenswrapper[4753]: E0129 12:34:52.259862 4753 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd77d4839_0017_46ba_8f19_d8ddc01a1878.slice/crio-conmon-7122b083e3807974cb87d059398a493bae9ffc5a54e1b8b45993cf9c52e14311.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podedf5d166_7a55_4ac0_ab98_ff0f503fd9a6.slice/crio-85106337f75ed28f3f76698f58fc7ceeafba753148daf1aab371218951585671.scope\": RecentStats: unable to find data in memory cache]" Jan 29 12:34:52 crc kubenswrapper[4753]: I0129 12:34:52.553642 4753 generic.go:334] "Generic (PLEG): container finished" podID="d77d4839-0017-46ba-8f19-d8ddc01a1878" containerID="7122b083e3807974cb87d059398a493bae9ffc5a54e1b8b45993cf9c52e14311" exitCode=0 Jan 29 12:34:52 crc kubenswrapper[4753]: I0129 12:34:52.553733 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-6c7b546b79-zwbp5" event={"ID":"d77d4839-0017-46ba-8f19-d8ddc01a1878","Type":"ContainerDied","Data":"7122b083e3807974cb87d059398a493bae9ffc5a54e1b8b45993cf9c52e14311"} Jan 29 12:34:52 crc kubenswrapper[4753]: I0129 12:34:52.555627 4753 generic.go:334] "Generic (PLEG): container finished" podID="edf5d166-7a55-4ac0-ab98-ff0f503fd9a6" containerID="85106337f75ed28f3f76698f58fc7ceeafba753148daf1aab371218951585671" exitCode=0 Jan 29 12:34:52 crc kubenswrapper[4753]: I0129 12:34:52.555653 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-index-qzjgm" event={"ID":"edf5d166-7a55-4ac0-ab98-ff0f503fd9a6","Type":"ContainerDied","Data":"85106337f75ed28f3f76698f58fc7ceeafba753148daf1aab371218951585671"} Jan 29 12:34:52 crc kubenswrapper[4753]: I0129 12:34:52.623680 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-6c7b546b79-zwbp5" Jan 29 12:34:52 crc kubenswrapper[4753]: I0129 12:34:52.708500 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-index-qzjgm" Jan 29 12:34:52 crc kubenswrapper[4753]: I0129 12:34:52.747330 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t9k5t\" (UniqueName: \"kubernetes.io/projected/d77d4839-0017-46ba-8f19-d8ddc01a1878-kube-api-access-t9k5t\") pod \"d77d4839-0017-46ba-8f19-d8ddc01a1878\" (UID: \"d77d4839-0017-46ba-8f19-d8ddc01a1878\") " Jan 29 12:34:52 crc kubenswrapper[4753]: I0129 12:34:52.747535 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/d77d4839-0017-46ba-8f19-d8ddc01a1878-apiservice-cert\") pod \"d77d4839-0017-46ba-8f19-d8ddc01a1878\" (UID: \"d77d4839-0017-46ba-8f19-d8ddc01a1878\") " Jan 29 12:34:52 crc kubenswrapper[4753]: I0129 12:34:52.747607 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/d77d4839-0017-46ba-8f19-d8ddc01a1878-webhook-cert\") pod \"d77d4839-0017-46ba-8f19-d8ddc01a1878\" (UID: \"d77d4839-0017-46ba-8f19-d8ddc01a1878\") " Jan 29 12:34:52 crc kubenswrapper[4753]: I0129 12:34:52.753557 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d77d4839-0017-46ba-8f19-d8ddc01a1878-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "d77d4839-0017-46ba-8f19-d8ddc01a1878" (UID: "d77d4839-0017-46ba-8f19-d8ddc01a1878"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:34:52 crc kubenswrapper[4753]: I0129 12:34:52.754158 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d77d4839-0017-46ba-8f19-d8ddc01a1878-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "d77d4839-0017-46ba-8f19-d8ddc01a1878" (UID: "d77d4839-0017-46ba-8f19-d8ddc01a1878"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:34:52 crc kubenswrapper[4753]: I0129 12:34:52.760090 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d77d4839-0017-46ba-8f19-d8ddc01a1878-kube-api-access-t9k5t" (OuterVolumeSpecName: "kube-api-access-t9k5t") pod "d77d4839-0017-46ba-8f19-d8ddc01a1878" (UID: "d77d4839-0017-46ba-8f19-d8ddc01a1878"). InnerVolumeSpecName "kube-api-access-t9k5t". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:34:52 crc kubenswrapper[4753]: I0129 12:34:52.848554 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dhr6v\" (UniqueName: \"kubernetes.io/projected/edf5d166-7a55-4ac0-ab98-ff0f503fd9a6-kube-api-access-dhr6v\") pod \"edf5d166-7a55-4ac0-ab98-ff0f503fd9a6\" (UID: \"edf5d166-7a55-4ac0-ab98-ff0f503fd9a6\") " Jan 29 12:34:52 crc kubenswrapper[4753]: I0129 12:34:52.848934 4753 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/d77d4839-0017-46ba-8f19-d8ddc01a1878-apiservice-cert\") on node \"crc\" DevicePath \"\"" Jan 29 12:34:52 crc kubenswrapper[4753]: I0129 12:34:52.848953 4753 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/d77d4839-0017-46ba-8f19-d8ddc01a1878-webhook-cert\") on node \"crc\" DevicePath \"\"" Jan 29 12:34:52 crc kubenswrapper[4753]: I0129 12:34:52.848963 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t9k5t\" (UniqueName: \"kubernetes.io/projected/d77d4839-0017-46ba-8f19-d8ddc01a1878-kube-api-access-t9k5t\") on node \"crc\" DevicePath \"\"" Jan 29 12:34:52 crc kubenswrapper[4753]: I0129 12:34:52.851806 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/edf5d166-7a55-4ac0-ab98-ff0f503fd9a6-kube-api-access-dhr6v" (OuterVolumeSpecName: "kube-api-access-dhr6v") pod "edf5d166-7a55-4ac0-ab98-ff0f503fd9a6" (UID: "edf5d166-7a55-4ac0-ab98-ff0f503fd9a6"). InnerVolumeSpecName "kube-api-access-dhr6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:34:52 crc kubenswrapper[4753]: I0129 12:34:52.952687 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dhr6v\" (UniqueName: \"kubernetes.io/projected/edf5d166-7a55-4ac0-ab98-ff0f503fd9a6-kube-api-access-dhr6v\") on node \"crc\" DevicePath \"\"" Jan 29 12:34:53 crc kubenswrapper[4753]: I0129 12:34:53.582060 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-index-qzjgm" event={"ID":"edf5d166-7a55-4ac0-ab98-ff0f503fd9a6","Type":"ContainerDied","Data":"180619a27f994f9612d9ab9a5d139e97343c3b2d10beac31d69ce5ef9bb3d33a"} Jan 29 12:34:53 crc kubenswrapper[4753]: I0129 12:34:53.582125 4753 scope.go:117] "RemoveContainer" containerID="85106337f75ed28f3f76698f58fc7ceeafba753148daf1aab371218951585671" Jan 29 12:34:53 crc kubenswrapper[4753]: I0129 12:34:53.582257 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-index-qzjgm" Jan 29 12:34:53 crc kubenswrapper[4753]: I0129 12:34:53.598537 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-6c7b546b79-zwbp5" event={"ID":"d77d4839-0017-46ba-8f19-d8ddc01a1878","Type":"ContainerDied","Data":"d3e8709eb81a390aa67b6cd40903280f57912773a97b2b770cdb7187a1ad3e95"} Jan 29 12:34:53 crc kubenswrapper[4753]: I0129 12:34:53.598670 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-6c7b546b79-zwbp5" Jan 29 12:34:53 crc kubenswrapper[4753]: I0129 12:34:53.646481 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/keystone-operator-index-qzjgm"] Jan 29 12:34:53 crc kubenswrapper[4753]: I0129 12:34:53.649766 4753 scope.go:117] "RemoveContainer" containerID="7122b083e3807974cb87d059398a493bae9ffc5a54e1b8b45993cf9c52e14311" Jan 29 12:34:53 crc kubenswrapper[4753]: I0129 12:34:53.657217 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/keystone-operator-index-qzjgm"] Jan 29 12:34:53 crc kubenswrapper[4753]: I0129 12:34:53.678618 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-6c7b546b79-zwbp5"] Jan 29 12:34:53 crc kubenswrapper[4753]: I0129 12:34:53.693431 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-6c7b546b79-zwbp5"] Jan 29 12:34:53 crc kubenswrapper[4753]: I0129 12:34:53.896093 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a3731426-3387-4e13-8aae-52f83d283335" path="/var/lib/kubelet/pods/a3731426-3387-4e13-8aae-52f83d283335/volumes" Jan 29 12:34:53 crc kubenswrapper[4753]: I0129 12:34:53.908717 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d77d4839-0017-46ba-8f19-d8ddc01a1878" path="/var/lib/kubelet/pods/d77d4839-0017-46ba-8f19-d8ddc01a1878/volumes" Jan 29 12:34:53 crc kubenswrapper[4753]: I0129 12:34:53.910087 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="edf5d166-7a55-4ac0-ab98-ff0f503fd9a6" path="/var/lib/kubelet/pods/edf5d166-7a55-4ac0-ab98-ff0f503fd9a6/volumes" Jan 29 12:34:53 crc kubenswrapper[4753]: I0129 12:34:53.967995 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-779fc9694b-gcg2x"] Jan 29 12:34:53 crc kubenswrapper[4753]: I0129 12:34:53.968257 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/rabbitmq-cluster-operator-779fc9694b-gcg2x" podUID="e4d9ede8-e63a-4ba7-b3e4-057ff1298b91" containerName="operator" containerID="cri-o://6112e805c4156eb8689cf1090f692bcddcfdeef5645b3c1e402118d65b56bfa3" gracePeriod=10 Jan 29 12:34:54 crc kubenswrapper[4753]: I0129 12:34:54.361493 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-index-w4plk"] Jan 29 12:34:54 crc kubenswrapper[4753]: I0129 12:34:54.365505 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/rabbitmq-cluster-operator-index-w4plk" podUID="d629e968-732d-4000-9ad9-16dceefa3077" containerName="registry-server" containerID="cri-o://82330e92e4c837f3d22485e432e105e98fe5e3d0a05c7f6ff321880d34cef51c" gracePeriod=30 Jan 29 12:34:54 crc kubenswrapper[4753]: I0129 12:34:54.466276 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590mk8wv"] Jan 29 12:34:54 crc kubenswrapper[4753]: I0129 12:34:54.470125 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e590mk8wv"] Jan 29 12:34:54 crc kubenswrapper[4753]: I0129 12:34:54.609735 4753 generic.go:334] "Generic (PLEG): container finished" podID="d629e968-732d-4000-9ad9-16dceefa3077" containerID="82330e92e4c837f3d22485e432e105e98fe5e3d0a05c7f6ff321880d34cef51c" exitCode=0 Jan 29 12:34:54 crc kubenswrapper[4753]: I0129 12:34:54.609859 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-index-w4plk" event={"ID":"d629e968-732d-4000-9ad9-16dceefa3077","Type":"ContainerDied","Data":"82330e92e4c837f3d22485e432e105e98fe5e3d0a05c7f6ff321880d34cef51c"} Jan 29 12:34:54 crc kubenswrapper[4753]: I0129 12:34:54.615404 4753 generic.go:334] "Generic (PLEG): container finished" podID="e4d9ede8-e63a-4ba7-b3e4-057ff1298b91" containerID="6112e805c4156eb8689cf1090f692bcddcfdeef5645b3c1e402118d65b56bfa3" exitCode=0 Jan 29 12:34:54 crc kubenswrapper[4753]: I0129 12:34:54.615458 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-779fc9694b-gcg2x" event={"ID":"e4d9ede8-e63a-4ba7-b3e4-057ff1298b91","Type":"ContainerDied","Data":"6112e805c4156eb8689cf1090f692bcddcfdeef5645b3c1e402118d65b56bfa3"} Jan 29 12:34:54 crc kubenswrapper[4753]: I0129 12:34:54.667506 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-779fc9694b-gcg2x" Jan 29 12:34:54 crc kubenswrapper[4753]: I0129 12:34:54.881794 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4w4ml\" (UniqueName: \"kubernetes.io/projected/e4d9ede8-e63a-4ba7-b3e4-057ff1298b91-kube-api-access-4w4ml\") pod \"e4d9ede8-e63a-4ba7-b3e4-057ff1298b91\" (UID: \"e4d9ede8-e63a-4ba7-b3e4-057ff1298b91\") " Jan 29 12:34:54 crc kubenswrapper[4753]: I0129 12:34:54.889040 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e4d9ede8-e63a-4ba7-b3e4-057ff1298b91-kube-api-access-4w4ml" (OuterVolumeSpecName: "kube-api-access-4w4ml") pod "e4d9ede8-e63a-4ba7-b3e4-057ff1298b91" (UID: "e4d9ede8-e63a-4ba7-b3e4-057ff1298b91"). InnerVolumeSpecName "kube-api-access-4w4ml". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:34:54 crc kubenswrapper[4753]: I0129 12:34:54.942416 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-index-w4plk" Jan 29 12:34:54 crc kubenswrapper[4753]: I0129 12:34:54.986371 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4w4ml\" (UniqueName: \"kubernetes.io/projected/e4d9ede8-e63a-4ba7-b3e4-057ff1298b91-kube-api-access-4w4ml\") on node \"crc\" DevicePath \"\"" Jan 29 12:34:55 crc kubenswrapper[4753]: I0129 12:34:55.087494 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2clp2\" (UniqueName: \"kubernetes.io/projected/d629e968-732d-4000-9ad9-16dceefa3077-kube-api-access-2clp2\") pod \"d629e968-732d-4000-9ad9-16dceefa3077\" (UID: \"d629e968-732d-4000-9ad9-16dceefa3077\") " Jan 29 12:34:55 crc kubenswrapper[4753]: I0129 12:34:55.092401 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d629e968-732d-4000-9ad9-16dceefa3077-kube-api-access-2clp2" (OuterVolumeSpecName: "kube-api-access-2clp2") pod "d629e968-732d-4000-9ad9-16dceefa3077" (UID: "d629e968-732d-4000-9ad9-16dceefa3077"). InnerVolumeSpecName "kube-api-access-2clp2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:34:55 crc kubenswrapper[4753]: I0129 12:34:55.189049 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2clp2\" (UniqueName: \"kubernetes.io/projected/d629e968-732d-4000-9ad9-16dceefa3077-kube-api-access-2clp2\") on node \"crc\" DevicePath \"\"" Jan 29 12:34:55 crc kubenswrapper[4753]: I0129 12:34:55.629385 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-779fc9694b-gcg2x" Jan 29 12:34:55 crc kubenswrapper[4753]: I0129 12:34:55.629410 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-779fc9694b-gcg2x" event={"ID":"e4d9ede8-e63a-4ba7-b3e4-057ff1298b91","Type":"ContainerDied","Data":"82ad557535f79864230d778ed2fdf7777d0cbdb8892d260c4860512cf07faaa8"} Jan 29 12:34:55 crc kubenswrapper[4753]: I0129 12:34:55.629528 4753 scope.go:117] "RemoveContainer" containerID="6112e805c4156eb8689cf1090f692bcddcfdeef5645b3c1e402118d65b56bfa3" Jan 29 12:34:55 crc kubenswrapper[4753]: I0129 12:34:55.632456 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-index-w4plk" event={"ID":"d629e968-732d-4000-9ad9-16dceefa3077","Type":"ContainerDied","Data":"5e7d5d189fdd47d22157692c38747b67faa0c47a4ee722ffb29a075ec326403b"} Jan 29 12:34:55 crc kubenswrapper[4753]: I0129 12:34:55.632524 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-index-w4plk" Jan 29 12:34:55 crc kubenswrapper[4753]: I0129 12:34:55.655432 4753 scope.go:117] "RemoveContainer" containerID="82330e92e4c837f3d22485e432e105e98fe5e3d0a05c7f6ff321880d34cef51c" Jan 29 12:34:55 crc kubenswrapper[4753]: I0129 12:34:55.668666 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-779fc9694b-gcg2x"] Jan 29 12:34:55 crc kubenswrapper[4753]: I0129 12:34:55.678115 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-779fc9694b-gcg2x"] Jan 29 12:34:55 crc kubenswrapper[4753]: I0129 12:34:55.682681 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-index-w4plk"] Jan 29 12:34:55 crc kubenswrapper[4753]: I0129 12:34:55.690923 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-index-w4plk"] Jan 29 12:34:55 crc kubenswrapper[4753]: I0129 12:34:55.889403 4753 scope.go:117] "RemoveContainer" containerID="00211d2562b6632acb04decda9045cbb8d3216adca69cd5bc1380ec42b8211e0" Jan 29 12:34:55 crc kubenswrapper[4753]: E0129 12:34:55.889727 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7c24x_openshift-machine-config-operator(b0310995-a7c7-47c3-ae6c-05daaaba92a6)\"" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" podUID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" Jan 29 12:34:55 crc kubenswrapper[4753]: I0129 12:34:55.897385 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ae1b286-c72f-4430-8504-1144b7265ebb" path="/var/lib/kubelet/pods/6ae1b286-c72f-4430-8504-1144b7265ebb/volumes" Jan 29 12:34:55 crc kubenswrapper[4753]: I0129 12:34:55.898381 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d629e968-732d-4000-9ad9-16dceefa3077" path="/var/lib/kubelet/pods/d629e968-732d-4000-9ad9-16dceefa3077/volumes" Jan 29 12:34:55 crc kubenswrapper[4753]: I0129 12:34:55.898888 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e4d9ede8-e63a-4ba7-b3e4-057ff1298b91" path="/var/lib/kubelet/pods/e4d9ede8-e63a-4ba7-b3e4-057ff1298b91/volumes" Jan 29 12:34:59 crc kubenswrapper[4753]: I0129 12:34:59.284098 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/infra-operator-controller-manager-79c7d496c4-zf52n"] Jan 29 12:34:59 crc kubenswrapper[4753]: I0129 12:34:59.284721 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/infra-operator-controller-manager-79c7d496c4-zf52n" podUID="6ad1bd9c-bfda-45eb-b89c-0ae519acdfdf" containerName="manager" containerID="cri-o://6ec9bf2fa311e9e2dea6efa58eb61abec5053750793a56fd5c499c2744674742" gracePeriod=10 Jan 29 12:34:59 crc kubenswrapper[4753]: I0129 12:34:59.490061 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/infra-operator-index-c6nzn"] Jan 29 12:34:59 crc kubenswrapper[4753]: I0129 12:34:59.490355 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/infra-operator-index-c6nzn" podUID="c73421ec-f2e6-4378-b4b5-6fcdcbee082b" containerName="registry-server" containerID="cri-o://1b59d0f2993ed03888c34a2e23832be3e53542a2eacb5e5294a37745a8f10697" gracePeriod=30 Jan 29 12:34:59 crc kubenswrapper[4753]: I0129 12:34:59.716272 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f7576q8z85"] Jan 29 12:34:59 crc kubenswrapper[4753]: I0129 12:34:59.716682 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f7576q8z85"] Jan 29 12:34:59 crc kubenswrapper[4753]: I0129 12:34:59.738339 4753 generic.go:334] "Generic (PLEG): container finished" podID="6ad1bd9c-bfda-45eb-b89c-0ae519acdfdf" containerID="6ec9bf2fa311e9e2dea6efa58eb61abec5053750793a56fd5c499c2744674742" exitCode=0 Jan 29 12:34:59 crc kubenswrapper[4753]: I0129 12:34:59.738399 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-79c7d496c4-zf52n" event={"ID":"6ad1bd9c-bfda-45eb-b89c-0ae519acdfdf","Type":"ContainerDied","Data":"6ec9bf2fa311e9e2dea6efa58eb61abec5053750793a56fd5c499c2744674742"} Jan 29 12:34:59 crc kubenswrapper[4753]: I0129 12:34:59.897423 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="00777d2a-950c-42f8-a8e2-9df4cbab7add" path="/var/lib/kubelet/pods/00777d2a-950c-42f8-a8e2-9df4cbab7add/volumes" Jan 29 12:35:00 crc kubenswrapper[4753]: I0129 12:35:00.814825 4753 generic.go:334] "Generic (PLEG): container finished" podID="c73421ec-f2e6-4378-b4b5-6fcdcbee082b" containerID="1b59d0f2993ed03888c34a2e23832be3e53542a2eacb5e5294a37745a8f10697" exitCode=0 Jan 29 12:35:00 crc kubenswrapper[4753]: I0129 12:35:00.815317 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-index-c6nzn" event={"ID":"c73421ec-f2e6-4378-b4b5-6fcdcbee082b","Type":"ContainerDied","Data":"1b59d0f2993ed03888c34a2e23832be3e53542a2eacb5e5294a37745a8f10697"} Jan 29 12:35:00 crc kubenswrapper[4753]: I0129 12:35:00.815375 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-index-c6nzn" event={"ID":"c73421ec-f2e6-4378-b4b5-6fcdcbee082b","Type":"ContainerDied","Data":"2915cfbdf994521b20df8f404382af41fd059ba8e61c75d612e531bd044f2b0a"} Jan 29 12:35:00 crc kubenswrapper[4753]: I0129 12:35:00.815413 4753 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2915cfbdf994521b20df8f404382af41fd059ba8e61c75d612e531bd044f2b0a" Jan 29 12:35:00 crc kubenswrapper[4753]: I0129 12:35:00.902536 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-index-c6nzn" Jan 29 12:35:00 crc kubenswrapper[4753]: I0129 12:35:00.903077 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-8d6c56984-hp786"] Jan 29 12:35:00 crc kubenswrapper[4753]: I0129 12:35:00.903322 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/mariadb-operator-controller-manager-8d6c56984-hp786" podUID="0a802cf6-528b-4923-baa7-a3eb25bf9bad" containerName="manager" containerID="cri-o://f0979a0f20cf1dd20af8d2287b74ef55439e85192334188602ab9c867f565bc1" gracePeriod=10 Jan 29 12:35:01 crc kubenswrapper[4753]: I0129 12:35:01.021208 4753 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/mariadb-operator-controller-manager-8d6c56984-hp786" podUID="0a802cf6-528b-4923-baa7-a3eb25bf9bad" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.48:8081/readyz\": dial tcp 10.217.0.48:8081: connect: connection refused" Jan 29 12:35:01 crc kubenswrapper[4753]: I0129 12:35:01.039437 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-79c7d496c4-zf52n" Jan 29 12:35:01 crc kubenswrapper[4753]: I0129 12:35:01.086017 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bls8b\" (UniqueName: \"kubernetes.io/projected/c73421ec-f2e6-4378-b4b5-6fcdcbee082b-kube-api-access-bls8b\") pod \"c73421ec-f2e6-4378-b4b5-6fcdcbee082b\" (UID: \"c73421ec-f2e6-4378-b4b5-6fcdcbee082b\") " Jan 29 12:35:01 crc kubenswrapper[4753]: I0129 12:35:01.196222 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mmkwk\" (UniqueName: \"kubernetes.io/projected/6ad1bd9c-bfda-45eb-b89c-0ae519acdfdf-kube-api-access-mmkwk\") pod \"6ad1bd9c-bfda-45eb-b89c-0ae519acdfdf\" (UID: \"6ad1bd9c-bfda-45eb-b89c-0ae519acdfdf\") " Jan 29 12:35:01 crc kubenswrapper[4753]: I0129 12:35:01.196294 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/6ad1bd9c-bfda-45eb-b89c-0ae519acdfdf-webhook-cert\") pod \"6ad1bd9c-bfda-45eb-b89c-0ae519acdfdf\" (UID: \"6ad1bd9c-bfda-45eb-b89c-0ae519acdfdf\") " Jan 29 12:35:01 crc kubenswrapper[4753]: I0129 12:35:01.208010 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ad1bd9c-bfda-45eb-b89c-0ae519acdfdf-kube-api-access-mmkwk" (OuterVolumeSpecName: "kube-api-access-mmkwk") pod "6ad1bd9c-bfda-45eb-b89c-0ae519acdfdf" (UID: "6ad1bd9c-bfda-45eb-b89c-0ae519acdfdf"). InnerVolumeSpecName "kube-api-access-mmkwk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:35:01 crc kubenswrapper[4753]: I0129 12:35:01.208127 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c73421ec-f2e6-4378-b4b5-6fcdcbee082b-kube-api-access-bls8b" (OuterVolumeSpecName: "kube-api-access-bls8b") pod "c73421ec-f2e6-4378-b4b5-6fcdcbee082b" (UID: "c73421ec-f2e6-4378-b4b5-6fcdcbee082b"). InnerVolumeSpecName "kube-api-access-bls8b". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:35:01 crc kubenswrapper[4753]: I0129 12:35:01.217007 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/mariadb-operator-index-kdzwc"] Jan 29 12:35:01 crc kubenswrapper[4753]: I0129 12:35:01.217335 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/mariadb-operator-index-kdzwc" podUID="6a2cd3a2-57bb-4d30-8d10-666c40a8908d" containerName="registry-server" containerID="cri-o://4daf2ffe545f5f8e7d9335a1f289bfed6cf75dff6ba525ff19fd0e2c6015d34b" gracePeriod=30 Jan 29 12:35:01 crc kubenswrapper[4753]: I0129 12:35:01.221679 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ad1bd9c-bfda-45eb-b89c-0ae519acdfdf-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "6ad1bd9c-bfda-45eb-b89c-0ae519acdfdf" (UID: "6ad1bd9c-bfda-45eb-b89c-0ae519acdfdf"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:35:01 crc kubenswrapper[4753]: I0129 12:35:01.248731 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f405mwqg"] Jan 29 12:35:01 crc kubenswrapper[4753]: I0129 12:35:01.265484 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f405mwqg"] Jan 29 12:35:01 crc kubenswrapper[4753]: I0129 12:35:01.297117 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/6ad1bd9c-bfda-45eb-b89c-0ae519acdfdf-apiservice-cert\") pod \"6ad1bd9c-bfda-45eb-b89c-0ae519acdfdf\" (UID: \"6ad1bd9c-bfda-45eb-b89c-0ae519acdfdf\") " Jan 29 12:35:01 crc kubenswrapper[4753]: I0129 12:35:01.297608 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mmkwk\" (UniqueName: \"kubernetes.io/projected/6ad1bd9c-bfda-45eb-b89c-0ae519acdfdf-kube-api-access-mmkwk\") on node \"crc\" DevicePath \"\"" Jan 29 12:35:01 crc kubenswrapper[4753]: I0129 12:35:01.297636 4753 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/6ad1bd9c-bfda-45eb-b89c-0ae519acdfdf-webhook-cert\") on node \"crc\" DevicePath \"\"" Jan 29 12:35:01 crc kubenswrapper[4753]: I0129 12:35:01.297649 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bls8b\" (UniqueName: \"kubernetes.io/projected/c73421ec-f2e6-4378-b4b5-6fcdcbee082b-kube-api-access-bls8b\") on node \"crc\" DevicePath \"\"" Jan 29 12:35:01 crc kubenswrapper[4753]: I0129 12:35:01.303370 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ad1bd9c-bfda-45eb-b89c-0ae519acdfdf-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "6ad1bd9c-bfda-45eb-b89c-0ae519acdfdf" (UID: "6ad1bd9c-bfda-45eb-b89c-0ae519acdfdf"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:35:01 crc kubenswrapper[4753]: I0129 12:35:01.398670 4753 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/6ad1bd9c-bfda-45eb-b89c-0ae519acdfdf-apiservice-cert\") on node \"crc\" DevicePath \"\"" Jan 29 12:35:01 crc kubenswrapper[4753]: I0129 12:35:01.603899 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-index-kdzwc" Jan 29 12:35:01 crc kubenswrapper[4753]: I0129 12:35:01.702973 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qwqcj\" (UniqueName: \"kubernetes.io/projected/6a2cd3a2-57bb-4d30-8d10-666c40a8908d-kube-api-access-qwqcj\") pod \"6a2cd3a2-57bb-4d30-8d10-666c40a8908d\" (UID: \"6a2cd3a2-57bb-4d30-8d10-666c40a8908d\") " Jan 29 12:35:01 crc kubenswrapper[4753]: I0129 12:35:01.708520 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6a2cd3a2-57bb-4d30-8d10-666c40a8908d-kube-api-access-qwqcj" (OuterVolumeSpecName: "kube-api-access-qwqcj") pod "6a2cd3a2-57bb-4d30-8d10-666c40a8908d" (UID: "6a2cd3a2-57bb-4d30-8d10-666c40a8908d"). InnerVolumeSpecName "kube-api-access-qwqcj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:35:01 crc kubenswrapper[4753]: I0129 12:35:01.804160 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qwqcj\" (UniqueName: \"kubernetes.io/projected/6a2cd3a2-57bb-4d30-8d10-666c40a8908d-kube-api-access-qwqcj\") on node \"crc\" DevicePath \"\"" Jan 29 12:35:01 crc kubenswrapper[4753]: I0129 12:35:01.828116 4753 generic.go:334] "Generic (PLEG): container finished" podID="6a2cd3a2-57bb-4d30-8d10-666c40a8908d" containerID="4daf2ffe545f5f8e7d9335a1f289bfed6cf75dff6ba525ff19fd0e2c6015d34b" exitCode=0 Jan 29 12:35:01 crc kubenswrapper[4753]: I0129 12:35:01.828209 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-index-kdzwc" Jan 29 12:35:01 crc kubenswrapper[4753]: I0129 12:35:01.828202 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-index-kdzwc" event={"ID":"6a2cd3a2-57bb-4d30-8d10-666c40a8908d","Type":"ContainerDied","Data":"4daf2ffe545f5f8e7d9335a1f289bfed6cf75dff6ba525ff19fd0e2c6015d34b"} Jan 29 12:35:01 crc kubenswrapper[4753]: I0129 12:35:01.828299 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-index-kdzwc" event={"ID":"6a2cd3a2-57bb-4d30-8d10-666c40a8908d","Type":"ContainerDied","Data":"df8d55fd68a861c4f7bbe554d9dbc06390c42dbcee96570ae7c9e27d36859f75"} Jan 29 12:35:01 crc kubenswrapper[4753]: I0129 12:35:01.828331 4753 scope.go:117] "RemoveContainer" containerID="4daf2ffe545f5f8e7d9335a1f289bfed6cf75dff6ba525ff19fd0e2c6015d34b" Jan 29 12:35:01 crc kubenswrapper[4753]: I0129 12:35:01.836009 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-79c7d496c4-zf52n" Jan 29 12:35:01 crc kubenswrapper[4753]: I0129 12:35:01.836010 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-79c7d496c4-zf52n" event={"ID":"6ad1bd9c-bfda-45eb-b89c-0ae519acdfdf","Type":"ContainerDied","Data":"346c6fa4ffb84d933401cb18c1fd1bc5863f538cb25b7def3ec1ae7792f3afe0"} Jan 29 12:35:01 crc kubenswrapper[4753]: I0129 12:35:01.838664 4753 generic.go:334] "Generic (PLEG): container finished" podID="0a802cf6-528b-4923-baa7-a3eb25bf9bad" containerID="f0979a0f20cf1dd20af8d2287b74ef55439e85192334188602ab9c867f565bc1" exitCode=0 Jan 29 12:35:01 crc kubenswrapper[4753]: I0129 12:35:01.838740 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-index-c6nzn" Jan 29 12:35:01 crc kubenswrapper[4753]: I0129 12:35:01.839934 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-8d6c56984-hp786" event={"ID":"0a802cf6-528b-4923-baa7-a3eb25bf9bad","Type":"ContainerDied","Data":"f0979a0f20cf1dd20af8d2287b74ef55439e85192334188602ab9c867f565bc1"} Jan 29 12:35:01 crc kubenswrapper[4753]: I0129 12:35:01.839967 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-8d6c56984-hp786" event={"ID":"0a802cf6-528b-4923-baa7-a3eb25bf9bad","Type":"ContainerDied","Data":"452cf0440cebcc3f572a85848234b39d64887d24c912979521258cc0c0233867"} Jan 29 12:35:01 crc kubenswrapper[4753]: I0129 12:35:01.839980 4753 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="452cf0440cebcc3f572a85848234b39d64887d24c912979521258cc0c0233867" Jan 29 12:35:01 crc kubenswrapper[4753]: I0129 12:35:01.859937 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-8d6c56984-hp786" Jan 29 12:35:01 crc kubenswrapper[4753]: I0129 12:35:01.866828 4753 scope.go:117] "RemoveContainer" containerID="4daf2ffe545f5f8e7d9335a1f289bfed6cf75dff6ba525ff19fd0e2c6015d34b" Jan 29 12:35:01 crc kubenswrapper[4753]: E0129 12:35:01.867633 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4daf2ffe545f5f8e7d9335a1f289bfed6cf75dff6ba525ff19fd0e2c6015d34b\": container with ID starting with 4daf2ffe545f5f8e7d9335a1f289bfed6cf75dff6ba525ff19fd0e2c6015d34b not found: ID does not exist" containerID="4daf2ffe545f5f8e7d9335a1f289bfed6cf75dff6ba525ff19fd0e2c6015d34b" Jan 29 12:35:01 crc kubenswrapper[4753]: I0129 12:35:01.867671 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4daf2ffe545f5f8e7d9335a1f289bfed6cf75dff6ba525ff19fd0e2c6015d34b"} err="failed to get container status \"4daf2ffe545f5f8e7d9335a1f289bfed6cf75dff6ba525ff19fd0e2c6015d34b\": rpc error: code = NotFound desc = could not find container \"4daf2ffe545f5f8e7d9335a1f289bfed6cf75dff6ba525ff19fd0e2c6015d34b\": container with ID starting with 4daf2ffe545f5f8e7d9335a1f289bfed6cf75dff6ba525ff19fd0e2c6015d34b not found: ID does not exist" Jan 29 12:35:01 crc kubenswrapper[4753]: I0129 12:35:01.867710 4753 scope.go:117] "RemoveContainer" containerID="6ec9bf2fa311e9e2dea6efa58eb61abec5053750793a56fd5c499c2744674742" Jan 29 12:35:01 crc kubenswrapper[4753]: I0129 12:35:01.920109 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e0312663-6b29-4d51-a91d-7e2caf2ef12d" path="/var/lib/kubelet/pods/e0312663-6b29-4d51-a91d-7e2caf2ef12d/volumes" Jan 29 12:35:01 crc kubenswrapper[4753]: I0129 12:35:01.927938 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/infra-operator-controller-manager-79c7d496c4-zf52n"] Jan 29 12:35:01 crc kubenswrapper[4753]: I0129 12:35:01.927982 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/infra-operator-controller-manager-79c7d496c4-zf52n"] Jan 29 12:35:01 crc kubenswrapper[4753]: I0129 12:35:01.928011 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/mariadb-operator-index-kdzwc"] Jan 29 12:35:01 crc kubenswrapper[4753]: I0129 12:35:01.939464 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/mariadb-operator-index-kdzwc"] Jan 29 12:35:01 crc kubenswrapper[4753]: I0129 12:35:01.945204 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/infra-operator-index-c6nzn"] Jan 29 12:35:01 crc kubenswrapper[4753]: I0129 12:35:01.949037 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/infra-operator-index-c6nzn"] Jan 29 12:35:02 crc kubenswrapper[4753]: I0129 12:35:02.006901 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/0a802cf6-528b-4923-baa7-a3eb25bf9bad-apiservice-cert\") pod \"0a802cf6-528b-4923-baa7-a3eb25bf9bad\" (UID: \"0a802cf6-528b-4923-baa7-a3eb25bf9bad\") " Jan 29 12:35:02 crc kubenswrapper[4753]: I0129 12:35:02.006969 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/0a802cf6-528b-4923-baa7-a3eb25bf9bad-webhook-cert\") pod \"0a802cf6-528b-4923-baa7-a3eb25bf9bad\" (UID: \"0a802cf6-528b-4923-baa7-a3eb25bf9bad\") " Jan 29 12:35:02 crc kubenswrapper[4753]: I0129 12:35:02.007070 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g9dsc\" (UniqueName: \"kubernetes.io/projected/0a802cf6-528b-4923-baa7-a3eb25bf9bad-kube-api-access-g9dsc\") pod \"0a802cf6-528b-4923-baa7-a3eb25bf9bad\" (UID: \"0a802cf6-528b-4923-baa7-a3eb25bf9bad\") " Jan 29 12:35:02 crc kubenswrapper[4753]: I0129 12:35:02.010431 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a802cf6-528b-4923-baa7-a3eb25bf9bad-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "0a802cf6-528b-4923-baa7-a3eb25bf9bad" (UID: "0a802cf6-528b-4923-baa7-a3eb25bf9bad"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:35:02 crc kubenswrapper[4753]: I0129 12:35:02.011115 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a802cf6-528b-4923-baa7-a3eb25bf9bad-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "0a802cf6-528b-4923-baa7-a3eb25bf9bad" (UID: "0a802cf6-528b-4923-baa7-a3eb25bf9bad"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:35:02 crc kubenswrapper[4753]: I0129 12:35:02.011350 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0a802cf6-528b-4923-baa7-a3eb25bf9bad-kube-api-access-g9dsc" (OuterVolumeSpecName: "kube-api-access-g9dsc") pod "0a802cf6-528b-4923-baa7-a3eb25bf9bad" (UID: "0a802cf6-528b-4923-baa7-a3eb25bf9bad"). InnerVolumeSpecName "kube-api-access-g9dsc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:35:02 crc kubenswrapper[4753]: I0129 12:35:02.108112 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g9dsc\" (UniqueName: \"kubernetes.io/projected/0a802cf6-528b-4923-baa7-a3eb25bf9bad-kube-api-access-g9dsc\") on node \"crc\" DevicePath \"\"" Jan 29 12:35:02 crc kubenswrapper[4753]: I0129 12:35:02.108159 4753 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/0a802cf6-528b-4923-baa7-a3eb25bf9bad-apiservice-cert\") on node \"crc\" DevicePath \"\"" Jan 29 12:35:02 crc kubenswrapper[4753]: I0129 12:35:02.108170 4753 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/0a802cf6-528b-4923-baa7-a3eb25bf9bad-webhook-cert\") on node \"crc\" DevicePath \"\"" Jan 29 12:35:02 crc kubenswrapper[4753]: I0129 12:35:02.852024 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-8d6c56984-hp786" Jan 29 12:35:02 crc kubenswrapper[4753]: I0129 12:35:02.884529 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-8d6c56984-hp786"] Jan 29 12:35:02 crc kubenswrapper[4753]: I0129 12:35:02.891060 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-8d6c56984-hp786"] Jan 29 12:35:03 crc kubenswrapper[4753]: I0129 12:35:03.897010 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0a802cf6-528b-4923-baa7-a3eb25bf9bad" path="/var/lib/kubelet/pods/0a802cf6-528b-4923-baa7-a3eb25bf9bad/volumes" Jan 29 12:35:03 crc kubenswrapper[4753]: I0129 12:35:03.897604 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6a2cd3a2-57bb-4d30-8d10-666c40a8908d" path="/var/lib/kubelet/pods/6a2cd3a2-57bb-4d30-8d10-666c40a8908d/volumes" Jan 29 12:35:03 crc kubenswrapper[4753]: I0129 12:35:03.898160 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ad1bd9c-bfda-45eb-b89c-0ae519acdfdf" path="/var/lib/kubelet/pods/6ad1bd9c-bfda-45eb-b89c-0ae519acdfdf/volumes" Jan 29 12:35:03 crc kubenswrapper[4753]: I0129 12:35:03.898719 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c73421ec-f2e6-4378-b4b5-6fcdcbee082b" path="/var/lib/kubelet/pods/c73421ec-f2e6-4378-b4b5-6fcdcbee082b/volumes" Jan 29 12:35:11 crc kubenswrapper[4753]: I0129 12:35:11.180657 4753 scope.go:117] "RemoveContainer" containerID="00211d2562b6632acb04decda9045cbb8d3216adca69cd5bc1380ec42b8211e0" Jan 29 12:35:11 crc kubenswrapper[4753]: E0129 12:35:11.181723 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7c24x_openshift-machine-config-operator(b0310995-a7c7-47c3-ae6c-05daaaba92a6)\"" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" podUID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" Jan 29 12:35:12 crc kubenswrapper[4753]: I0129 12:35:12.351062 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/keystonedfb1-account-delete-t7d55" Jan 29 12:35:12 crc kubenswrapper[4753]: I0129 12:35:12.401596 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tfd72\" (UniqueName: \"kubernetes.io/projected/74538c68-72a6-44a4-a906-3cf0a864a392-kube-api-access-tfd72\") pod \"74538c68-72a6-44a4-a906-3cf0a864a392\" (UID: \"74538c68-72a6-44a4-a906-3cf0a864a392\") " Jan 29 12:35:12 crc kubenswrapper[4753]: I0129 12:35:12.401736 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/74538c68-72a6-44a4-a906-3cf0a864a392-operator-scripts\") pod \"74538c68-72a6-44a4-a906-3cf0a864a392\" (UID: \"74538c68-72a6-44a4-a906-3cf0a864a392\") " Jan 29 12:35:12 crc kubenswrapper[4753]: I0129 12:35:12.402527 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/74538c68-72a6-44a4-a906-3cf0a864a392-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "74538c68-72a6-44a4-a906-3cf0a864a392" (UID: "74538c68-72a6-44a4-a906-3cf0a864a392"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:35:12 crc kubenswrapper[4753]: I0129 12:35:12.407579 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/74538c68-72a6-44a4-a906-3cf0a864a392-kube-api-access-tfd72" (OuterVolumeSpecName: "kube-api-access-tfd72") pod "74538c68-72a6-44a4-a906-3cf0a864a392" (UID: "74538c68-72a6-44a4-a906-3cf0a864a392"). InnerVolumeSpecName "kube-api-access-tfd72". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:35:12 crc kubenswrapper[4753]: I0129 12:35:12.467560 4753 generic.go:334] "Generic (PLEG): container finished" podID="74538c68-72a6-44a4-a906-3cf0a864a392" containerID="f01bf5128c8e96d8ceb312c4f1b44ad410f48f5c54623eead170d31ce0311c5a" exitCode=137 Jan 29 12:35:12 crc kubenswrapper[4753]: I0129 12:35:12.467614 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/keystonedfb1-account-delete-t7d55" Jan 29 12:35:12 crc kubenswrapper[4753]: I0129 12:35:12.467639 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/keystonedfb1-account-delete-t7d55" event={"ID":"74538c68-72a6-44a4-a906-3cf0a864a392","Type":"ContainerDied","Data":"f01bf5128c8e96d8ceb312c4f1b44ad410f48f5c54623eead170d31ce0311c5a"} Jan 29 12:35:12 crc kubenswrapper[4753]: I0129 12:35:12.467686 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/keystonedfb1-account-delete-t7d55" event={"ID":"74538c68-72a6-44a4-a906-3cf0a864a392","Type":"ContainerDied","Data":"b1b3638c69b4bd5a3d916cc5dc4822fc6155c8882f6b4b057bb06541087b3cae"} Jan 29 12:35:12 crc kubenswrapper[4753]: I0129 12:35:12.467707 4753 scope.go:117] "RemoveContainer" containerID="f01bf5128c8e96d8ceb312c4f1b44ad410f48f5c54623eead170d31ce0311c5a" Jan 29 12:35:12 crc kubenswrapper[4753]: I0129 12:35:12.503465 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tfd72\" (UniqueName: \"kubernetes.io/projected/74538c68-72a6-44a4-a906-3cf0a864a392-kube-api-access-tfd72\") on node \"crc\" DevicePath \"\"" Jan 29 12:35:12 crc kubenswrapper[4753]: I0129 12:35:12.503670 4753 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/74538c68-72a6-44a4-a906-3cf0a864a392-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 12:35:12 crc kubenswrapper[4753]: I0129 12:35:12.503845 4753 scope.go:117] "RemoveContainer" containerID="f01bf5128c8e96d8ceb312c4f1b44ad410f48f5c54623eead170d31ce0311c5a" Jan 29 12:35:12 crc kubenswrapper[4753]: E0129 12:35:12.506358 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f01bf5128c8e96d8ceb312c4f1b44ad410f48f5c54623eead170d31ce0311c5a\": container with ID starting with f01bf5128c8e96d8ceb312c4f1b44ad410f48f5c54623eead170d31ce0311c5a not found: ID does not exist" containerID="f01bf5128c8e96d8ceb312c4f1b44ad410f48f5c54623eead170d31ce0311c5a" Jan 29 12:35:12 crc kubenswrapper[4753]: I0129 12:35:12.506404 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f01bf5128c8e96d8ceb312c4f1b44ad410f48f5c54623eead170d31ce0311c5a"} err="failed to get container status \"f01bf5128c8e96d8ceb312c4f1b44ad410f48f5c54623eead170d31ce0311c5a\": rpc error: code = NotFound desc = could not find container \"f01bf5128c8e96d8ceb312c4f1b44ad410f48f5c54623eead170d31ce0311c5a\": container with ID starting with f01bf5128c8e96d8ceb312c4f1b44ad410f48f5c54623eead170d31ce0311c5a not found: ID does not exist" Jan 29 12:35:12 crc kubenswrapper[4753]: I0129 12:35:12.516802 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["swift-kuttl-tests/keystonedfb1-account-delete-t7d55"] Jan 29 12:35:12 crc kubenswrapper[4753]: I0129 12:35:12.522848 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["swift-kuttl-tests/keystonedfb1-account-delete-t7d55"] Jan 29 12:35:12 crc kubenswrapper[4753]: E0129 12:35:12.566434 4753 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod74538c68_72a6_44a4_a906_3cf0a864a392.slice\": RecentStats: unable to find data in memory cache]" Jan 29 12:35:13 crc kubenswrapper[4753]: I0129 12:35:13.896340 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="74538c68-72a6-44a4-a906-3cf0a864a392" path="/var/lib/kubelet/pods/74538c68-72a6-44a4-a906-3cf0a864a392/volumes" Jan 29 12:35:14 crc kubenswrapper[4753]: I0129 12:35:14.413976 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-mwvhx/must-gather-bb7qn"] Jan 29 12:35:14 crc kubenswrapper[4753]: E0129 12:35:14.414725 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="edf5d166-7a55-4ac0-ab98-ff0f503fd9a6" containerName="registry-server" Jan 29 12:35:14 crc kubenswrapper[4753]: I0129 12:35:14.414768 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="edf5d166-7a55-4ac0-ab98-ff0f503fd9a6" containerName="registry-server" Jan 29 12:35:14 crc kubenswrapper[4753]: E0129 12:35:14.414794 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e0033592-f961-4066-9026-3645b09f9524" containerName="rabbitmq" Jan 29 12:35:14 crc kubenswrapper[4753]: I0129 12:35:14.414802 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="e0033592-f961-4066-9026-3645b09f9524" containerName="rabbitmq" Jan 29 12:35:14 crc kubenswrapper[4753]: E0129 12:35:14.414818 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6a2cd3a2-57bb-4d30-8d10-666c40a8908d" containerName="registry-server" Jan 29 12:35:14 crc kubenswrapper[4753]: I0129 12:35:14.414827 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="6a2cd3a2-57bb-4d30-8d10-666c40a8908d" containerName="registry-server" Jan 29 12:35:14 crc kubenswrapper[4753]: E0129 12:35:14.414843 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc8c9706-0165-4cc6-ad22-bc70f03d5bf9" containerName="mysql-bootstrap" Jan 29 12:35:14 crc kubenswrapper[4753]: I0129 12:35:14.414850 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc8c9706-0165-4cc6-ad22-bc70f03d5bf9" containerName="mysql-bootstrap" Jan 29 12:35:14 crc kubenswrapper[4753]: E0129 12:35:14.414864 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e4d9ede8-e63a-4ba7-b3e4-057ff1298b91" containerName="operator" Jan 29 12:35:14 crc kubenswrapper[4753]: I0129 12:35:14.414874 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="e4d9ede8-e63a-4ba7-b3e4-057ff1298b91" containerName="operator" Jan 29 12:35:14 crc kubenswrapper[4753]: E0129 12:35:14.414882 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e0033592-f961-4066-9026-3645b09f9524" containerName="setup-container" Jan 29 12:35:14 crc kubenswrapper[4753]: I0129 12:35:14.414889 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="e0033592-f961-4066-9026-3645b09f9524" containerName="setup-container" Jan 29 12:35:14 crc kubenswrapper[4753]: E0129 12:35:14.414902 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bd1f7149-df12-4c02-9585-238bfd26f12d" containerName="memcached" Jan 29 12:35:14 crc kubenswrapper[4753]: I0129 12:35:14.414909 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="bd1f7149-df12-4c02-9585-238bfd26f12d" containerName="memcached" Jan 29 12:35:14 crc kubenswrapper[4753]: E0129 12:35:14.414919 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d629e968-732d-4000-9ad9-16dceefa3077" containerName="registry-server" Jan 29 12:35:14 crc kubenswrapper[4753]: I0129 12:35:14.414926 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="d629e968-732d-4000-9ad9-16dceefa3077" containerName="registry-server" Jan 29 12:35:14 crc kubenswrapper[4753]: E0129 12:35:14.414939 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f49f1749-fe9d-4a18-b81c-65ab628d882e" containerName="manager" Jan 29 12:35:14 crc kubenswrapper[4753]: I0129 12:35:14.414947 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="f49f1749-fe9d-4a18-b81c-65ab628d882e" containerName="manager" Jan 29 12:35:14 crc kubenswrapper[4753]: E0129 12:35:14.414960 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a802cf6-528b-4923-baa7-a3eb25bf9bad" containerName="manager" Jan 29 12:35:14 crc kubenswrapper[4753]: I0129 12:35:14.414968 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a802cf6-528b-4923-baa7-a3eb25bf9bad" containerName="manager" Jan 29 12:35:14 crc kubenswrapper[4753]: E0129 12:35:14.414978 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc8c9706-0165-4cc6-ad22-bc70f03d5bf9" containerName="galera" Jan 29 12:35:14 crc kubenswrapper[4753]: I0129 12:35:14.414988 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc8c9706-0165-4cc6-ad22-bc70f03d5bf9" containerName="galera" Jan 29 12:35:14 crc kubenswrapper[4753]: E0129 12:35:14.414999 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c0945dd3-02c0-42cd-8a01-5ceba63d4fb7" containerName="registry-server" Jan 29 12:35:14 crc kubenswrapper[4753]: I0129 12:35:14.415008 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="c0945dd3-02c0-42cd-8a01-5ceba63d4fb7" containerName="registry-server" Jan 29 12:35:14 crc kubenswrapper[4753]: E0129 12:35:14.415019 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="01f1fcff-ebfe-4f44-9ed6-ea5308d4f2b6" containerName="registry-server" Jan 29 12:35:14 crc kubenswrapper[4753]: I0129 12:35:14.415028 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="01f1fcff-ebfe-4f44-9ed6-ea5308d4f2b6" containerName="registry-server" Jan 29 12:35:14 crc kubenswrapper[4753]: E0129 12:35:14.415045 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6762f214-cfd1-4314-9fa0-1d2b40e87b4e" containerName="galera" Jan 29 12:35:14 crc kubenswrapper[4753]: I0129 12:35:14.415053 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="6762f214-cfd1-4314-9fa0-1d2b40e87b4e" containerName="galera" Jan 29 12:35:14 crc kubenswrapper[4753]: E0129 12:35:14.415062 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="74538c68-72a6-44a4-a906-3cf0a864a392" containerName="mariadb-account-delete" Jan 29 12:35:14 crc kubenswrapper[4753]: I0129 12:35:14.415070 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="74538c68-72a6-44a4-a906-3cf0a864a392" containerName="mariadb-account-delete" Jan 29 12:35:14 crc kubenswrapper[4753]: E0129 12:35:14.415084 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="89e38fc2-9f07-45f0-8cdc-b77931872d7b" containerName="mysql-bootstrap" Jan 29 12:35:14 crc kubenswrapper[4753]: I0129 12:35:14.415091 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="89e38fc2-9f07-45f0-8cdc-b77931872d7b" containerName="mysql-bootstrap" Jan 29 12:35:14 crc kubenswrapper[4753]: E0129 12:35:14.415102 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6ad1bd9c-bfda-45eb-b89c-0ae519acdfdf" containerName="manager" Jan 29 12:35:14 crc kubenswrapper[4753]: I0129 12:35:14.415110 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="6ad1bd9c-bfda-45eb-b89c-0ae519acdfdf" containerName="manager" Jan 29 12:35:14 crc kubenswrapper[4753]: E0129 12:35:14.415122 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c73421ec-f2e6-4378-b4b5-6fcdcbee082b" containerName="registry-server" Jan 29 12:35:14 crc kubenswrapper[4753]: I0129 12:35:14.415142 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="c73421ec-f2e6-4378-b4b5-6fcdcbee082b" containerName="registry-server" Jan 29 12:35:14 crc kubenswrapper[4753]: E0129 12:35:14.415152 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6762f214-cfd1-4314-9fa0-1d2b40e87b4e" containerName="mysql-bootstrap" Jan 29 12:35:14 crc kubenswrapper[4753]: I0129 12:35:14.415160 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="6762f214-cfd1-4314-9fa0-1d2b40e87b4e" containerName="mysql-bootstrap" Jan 29 12:35:14 crc kubenswrapper[4753]: E0129 12:35:14.415172 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d77d4839-0017-46ba-8f19-d8ddc01a1878" containerName="manager" Jan 29 12:35:14 crc kubenswrapper[4753]: I0129 12:35:14.415180 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="d77d4839-0017-46ba-8f19-d8ddc01a1878" containerName="manager" Jan 29 12:35:14 crc kubenswrapper[4753]: E0129 12:35:14.415191 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3b46211c-99c7-44c9-8275-991e70edba9d" containerName="manager" Jan 29 12:35:14 crc kubenswrapper[4753]: I0129 12:35:14.415199 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="3b46211c-99c7-44c9-8275-991e70edba9d" containerName="manager" Jan 29 12:35:14 crc kubenswrapper[4753]: E0129 12:35:14.415214 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="89e38fc2-9f07-45f0-8cdc-b77931872d7b" containerName="galera" Jan 29 12:35:14 crc kubenswrapper[4753]: I0129 12:35:14.415269 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="89e38fc2-9f07-45f0-8cdc-b77931872d7b" containerName="galera" Jan 29 12:35:14 crc kubenswrapper[4753]: I0129 12:35:14.415456 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="cc8c9706-0165-4cc6-ad22-bc70f03d5bf9" containerName="galera" Jan 29 12:35:14 crc kubenswrapper[4753]: I0129 12:35:14.415475 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="0a802cf6-528b-4923-baa7-a3eb25bf9bad" containerName="manager" Jan 29 12:35:14 crc kubenswrapper[4753]: I0129 12:35:14.415504 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="6762f214-cfd1-4314-9fa0-1d2b40e87b4e" containerName="galera" Jan 29 12:35:14 crc kubenswrapper[4753]: I0129 12:35:14.415517 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="d629e968-732d-4000-9ad9-16dceefa3077" containerName="registry-server" Jan 29 12:35:14 crc kubenswrapper[4753]: I0129 12:35:14.415525 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="c0945dd3-02c0-42cd-8a01-5ceba63d4fb7" containerName="registry-server" Jan 29 12:35:14 crc kubenswrapper[4753]: I0129 12:35:14.415534 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="01f1fcff-ebfe-4f44-9ed6-ea5308d4f2b6" containerName="registry-server" Jan 29 12:35:14 crc kubenswrapper[4753]: I0129 12:35:14.415545 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="f49f1749-fe9d-4a18-b81c-65ab628d882e" containerName="manager" Jan 29 12:35:14 crc kubenswrapper[4753]: I0129 12:35:14.415557 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="e4d9ede8-e63a-4ba7-b3e4-057ff1298b91" containerName="operator" Jan 29 12:35:14 crc kubenswrapper[4753]: I0129 12:35:14.415567 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="74538c68-72a6-44a4-a906-3cf0a864a392" containerName="mariadb-account-delete" Jan 29 12:35:14 crc kubenswrapper[4753]: I0129 12:35:14.415577 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="3b46211c-99c7-44c9-8275-991e70edba9d" containerName="manager" Jan 29 12:35:14 crc kubenswrapper[4753]: I0129 12:35:14.415589 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="d77d4839-0017-46ba-8f19-d8ddc01a1878" containerName="manager" Jan 29 12:35:14 crc kubenswrapper[4753]: I0129 12:35:14.415599 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="6ad1bd9c-bfda-45eb-b89c-0ae519acdfdf" containerName="manager" Jan 29 12:35:14 crc kubenswrapper[4753]: I0129 12:35:14.415608 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="bd1f7149-df12-4c02-9585-238bfd26f12d" containerName="memcached" Jan 29 12:35:14 crc kubenswrapper[4753]: I0129 12:35:14.415621 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="89e38fc2-9f07-45f0-8cdc-b77931872d7b" containerName="galera" Jan 29 12:35:14 crc kubenswrapper[4753]: I0129 12:35:14.415631 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="c73421ec-f2e6-4378-b4b5-6fcdcbee082b" containerName="registry-server" Jan 29 12:35:14 crc kubenswrapper[4753]: I0129 12:35:14.415642 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="edf5d166-7a55-4ac0-ab98-ff0f503fd9a6" containerName="registry-server" Jan 29 12:35:14 crc kubenswrapper[4753]: I0129 12:35:14.415651 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="6a2cd3a2-57bb-4d30-8d10-666c40a8908d" containerName="registry-server" Jan 29 12:35:14 crc kubenswrapper[4753]: I0129 12:35:14.415661 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="e0033592-f961-4066-9026-3645b09f9524" containerName="rabbitmq" Jan 29 12:35:14 crc kubenswrapper[4753]: I0129 12:35:14.416712 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-mwvhx/must-gather-bb7qn" Jan 29 12:35:14 crc kubenswrapper[4753]: I0129 12:35:14.421437 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-mwvhx"/"kube-root-ca.crt" Jan 29 12:35:14 crc kubenswrapper[4753]: I0129 12:35:14.425349 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-mwvhx"/"openshift-service-ca.crt" Jan 29 12:35:14 crc kubenswrapper[4753]: I0129 12:35:14.433354 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-mwvhx/must-gather-bb7qn"] Jan 29 12:35:14 crc kubenswrapper[4753]: I0129 12:35:14.604154 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kglpn\" (UniqueName: \"kubernetes.io/projected/ea89829a-0669-4039-b610-18262b3eaf9f-kube-api-access-kglpn\") pod \"must-gather-bb7qn\" (UID: \"ea89829a-0669-4039-b610-18262b3eaf9f\") " pod="openshift-must-gather-mwvhx/must-gather-bb7qn" Jan 29 12:35:14 crc kubenswrapper[4753]: I0129 12:35:14.604486 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/ea89829a-0669-4039-b610-18262b3eaf9f-must-gather-output\") pod \"must-gather-bb7qn\" (UID: \"ea89829a-0669-4039-b610-18262b3eaf9f\") " pod="openshift-must-gather-mwvhx/must-gather-bb7qn" Jan 29 12:35:14 crc kubenswrapper[4753]: I0129 12:35:14.705627 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/ea89829a-0669-4039-b610-18262b3eaf9f-must-gather-output\") pod \"must-gather-bb7qn\" (UID: \"ea89829a-0669-4039-b610-18262b3eaf9f\") " pod="openshift-must-gather-mwvhx/must-gather-bb7qn" Jan 29 12:35:14 crc kubenswrapper[4753]: I0129 12:35:14.706197 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kglpn\" (UniqueName: \"kubernetes.io/projected/ea89829a-0669-4039-b610-18262b3eaf9f-kube-api-access-kglpn\") pod \"must-gather-bb7qn\" (UID: \"ea89829a-0669-4039-b610-18262b3eaf9f\") " pod="openshift-must-gather-mwvhx/must-gather-bb7qn" Jan 29 12:35:14 crc kubenswrapper[4753]: I0129 12:35:14.706445 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/ea89829a-0669-4039-b610-18262b3eaf9f-must-gather-output\") pod \"must-gather-bb7qn\" (UID: \"ea89829a-0669-4039-b610-18262b3eaf9f\") " pod="openshift-must-gather-mwvhx/must-gather-bb7qn" Jan 29 12:35:14 crc kubenswrapper[4753]: I0129 12:35:14.736006 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kglpn\" (UniqueName: \"kubernetes.io/projected/ea89829a-0669-4039-b610-18262b3eaf9f-kube-api-access-kglpn\") pod \"must-gather-bb7qn\" (UID: \"ea89829a-0669-4039-b610-18262b3eaf9f\") " pod="openshift-must-gather-mwvhx/must-gather-bb7qn" Jan 29 12:35:15 crc kubenswrapper[4753]: I0129 12:35:15.036114 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-mwvhx/must-gather-bb7qn" Jan 29 12:35:15 crc kubenswrapper[4753]: I0129 12:35:15.726958 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-mwvhx/must-gather-bb7qn"] Jan 29 12:35:16 crc kubenswrapper[4753]: I0129 12:35:16.500069 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-mwvhx/must-gather-bb7qn" event={"ID":"ea89829a-0669-4039-b610-18262b3eaf9f","Type":"ContainerStarted","Data":"585c0705364585d2cf2322283ecc2e129eb38a5b474d85b844d70126d4bb2899"} Jan 29 12:35:24 crc kubenswrapper[4753]: I0129 12:35:24.888313 4753 scope.go:117] "RemoveContainer" containerID="00211d2562b6632acb04decda9045cbb8d3216adca69cd5bc1380ec42b8211e0" Jan 29 12:35:24 crc kubenswrapper[4753]: E0129 12:35:24.889946 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7c24x_openshift-machine-config-operator(b0310995-a7c7-47c3-ae6c-05daaaba92a6)\"" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" podUID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" Jan 29 12:35:25 crc kubenswrapper[4753]: I0129 12:35:25.844436 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-mwvhx/must-gather-bb7qn" event={"ID":"ea89829a-0669-4039-b610-18262b3eaf9f","Type":"ContainerStarted","Data":"b3b6a2f7fb9ddbfd564af86f2ea3264eda7d98c9b0359fd02ad09c69cb9fade4"} Jan 29 12:35:25 crc kubenswrapper[4753]: I0129 12:35:25.844701 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-mwvhx/must-gather-bb7qn" event={"ID":"ea89829a-0669-4039-b610-18262b3eaf9f","Type":"ContainerStarted","Data":"91f8811a03688aed81d0a3e6f48cd444c9b726139a4773cb07b3a70aaacc5f71"} Jan 29 12:35:25 crc kubenswrapper[4753]: I0129 12:35:25.864237 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-mwvhx/must-gather-bb7qn" podStartSLOduration=2.729835176 podStartE2EDuration="11.864181216s" podCreationTimestamp="2026-01-29 12:35:14 +0000 UTC" firstStartedPulling="2026-01-29 12:35:15.736084278 +0000 UTC m=+1729.988165743" lastFinishedPulling="2026-01-29 12:35:24.870430328 +0000 UTC m=+1739.122511783" observedRunningTime="2026-01-29 12:35:25.859186992 +0000 UTC m=+1740.111268447" watchObservedRunningTime="2026-01-29 12:35:25.864181216 +0000 UTC m=+1740.116262671" Jan 29 12:35:31 crc kubenswrapper[4753]: I0129 12:35:31.167337 4753 scope.go:117] "RemoveContainer" containerID="9493a081b6664e80a4fc55f550dc410692f87db4482ecc8ff21e5f596171479b" Jan 29 12:35:31 crc kubenswrapper[4753]: I0129 12:35:31.500456 4753 scope.go:117] "RemoveContainer" containerID="3f73ff64726c4e416c38dc71317be25c33816fc79a84cb44487910144496b139" Jan 29 12:35:31 crc kubenswrapper[4753]: I0129 12:35:31.548144 4753 scope.go:117] "RemoveContainer" containerID="a5287cd9f7b12d8a9969f6797a61d02c1feca9578af605310e5f5bb5209cc514" Jan 29 12:35:31 crc kubenswrapper[4753]: I0129 12:35:31.585163 4753 scope.go:117] "RemoveContainer" containerID="b26040703dd15e8e80da2fa0771fc55a873b8bd701de901596d2cfd5a55c1d0b" Jan 29 12:35:31 crc kubenswrapper[4753]: I0129 12:35:31.610153 4753 scope.go:117] "RemoveContainer" containerID="3a699a14372b8c8a305238e873dc0a0ada7481e1bf3b1ebe5d2c30194c5dc25a" Jan 29 12:35:31 crc kubenswrapper[4753]: I0129 12:35:31.840762 4753 scope.go:117] "RemoveContainer" containerID="214a7cdb6552aee101ffa784b2a2010895c902ea2346b722c0f9d1c73e432c87" Jan 29 12:35:31 crc kubenswrapper[4753]: I0129 12:35:31.860274 4753 scope.go:117] "RemoveContainer" containerID="5b75cff6b50c241634eb144f1655b8507425989ac137c4fa0c24e2364433fd8a" Jan 29 12:35:31 crc kubenswrapper[4753]: I0129 12:35:31.903264 4753 scope.go:117] "RemoveContainer" containerID="b85db201059a7d27aa1f4d5e8e377ef0658f80ca8fc1e9e04678ab46f32f8429" Jan 29 12:35:31 crc kubenswrapper[4753]: I0129 12:35:31.935576 4753 scope.go:117] "RemoveContainer" containerID="55a7e62f5e99631b1c13bcb20e4cc75a01c6c0b749948a47736ee4222a197102" Jan 29 12:35:31 crc kubenswrapper[4753]: I0129 12:35:31.954533 4753 scope.go:117] "RemoveContainer" containerID="23e39d78164420907cfe0f72f6db0fb34257618d5975d40e0199b96b4f1c5919" Jan 29 12:35:31 crc kubenswrapper[4753]: I0129 12:35:31.985500 4753 scope.go:117] "RemoveContainer" containerID="3505b43c9dc5670652003dbefee8db3985f456ddfae8884fb55461c8a8377f0c" Jan 29 12:35:32 crc kubenswrapper[4753]: I0129 12:35:32.006155 4753 scope.go:117] "RemoveContainer" containerID="1b59d0f2993ed03888c34a2e23832be3e53542a2eacb5e5294a37745a8f10697" Jan 29 12:35:32 crc kubenswrapper[4753]: I0129 12:35:32.027351 4753 scope.go:117] "RemoveContainer" containerID="996f3aece49f84b40de1877083ab8f46266c6ebe2de86803b43fa754ebc52b06" Jan 29 12:35:32 crc kubenswrapper[4753]: I0129 12:35:32.052571 4753 scope.go:117] "RemoveContainer" containerID="01bece6467d47e7482bfb93cf7ed09c408d0f3d2be5fc94c1777450152ea7b02" Jan 29 12:35:32 crc kubenswrapper[4753]: I0129 12:35:32.071802 4753 scope.go:117] "RemoveContainer" containerID="3bc8557324f62a6150da3afd5be16af1bb3af5b6eb84f912b02c6ee75e81b9e6" Jan 29 12:35:32 crc kubenswrapper[4753]: I0129 12:35:32.091479 4753 scope.go:117] "RemoveContainer" containerID="14a0794c4dd096cd14b5c2b51ef1d350510fda6035786468620cf1f2d1d70dc7" Jan 29 12:35:32 crc kubenswrapper[4753]: I0129 12:35:32.118151 4753 scope.go:117] "RemoveContainer" containerID="80cd849636ed48f0ac6878f7624b5a64c86305ea76c40e4b2d8a76897f16be26" Jan 29 12:35:32 crc kubenswrapper[4753]: I0129 12:35:32.139188 4753 scope.go:117] "RemoveContainer" containerID="216e929f3512a42b1815165bd3576440eeb076917ddbdfaabe86afb2bde69ed1" Jan 29 12:35:32 crc kubenswrapper[4753]: I0129 12:35:32.177976 4753 scope.go:117] "RemoveContainer" containerID="206092a3b35a5a4ec282d55dd57d178ea88f61a31810e16320d600723b4fbc46" Jan 29 12:35:32 crc kubenswrapper[4753]: I0129 12:35:32.258629 4753 scope.go:117] "RemoveContainer" containerID="61476c9e7ca135f2fe3fc971fa84cc1982d27a72d7c6e6dd268cf602b83b81b0" Jan 29 12:35:32 crc kubenswrapper[4753]: I0129 12:35:32.283056 4753 scope.go:117] "RemoveContainer" containerID="84bd8e96b7ca4bb2150e0c9bf51a214d128ba743a9a76d644f83028757efb8cf" Jan 29 12:35:32 crc kubenswrapper[4753]: I0129 12:35:32.326715 4753 scope.go:117] "RemoveContainer" containerID="f0979a0f20cf1dd20af8d2287b74ef55439e85192334188602ab9c867f565bc1" Jan 29 12:35:32 crc kubenswrapper[4753]: I0129 12:35:32.343892 4753 scope.go:117] "RemoveContainer" containerID="2e87f456e5cf6f1ead988353be01501ea3e5e591fcda20684f5468291e2da843" Jan 29 12:35:32 crc kubenswrapper[4753]: I0129 12:35:32.369072 4753 scope.go:117] "RemoveContainer" containerID="2d593656e256ee265e41ce2d191c01b17717b06fe5488bad59cfdb73f4e4de60" Jan 29 12:35:32 crc kubenswrapper[4753]: I0129 12:35:32.397914 4753 scope.go:117] "RemoveContainer" containerID="53b7eb75d85acb11ab4df368734c49a9684c81baf557ab0eaa764632ccc57550" Jan 29 12:35:32 crc kubenswrapper[4753]: I0129 12:35:32.415985 4753 scope.go:117] "RemoveContainer" containerID="5a4fc950ff1f25f77d0dd675477676b8f9f5793292681d47966e5858e7b180ce" Jan 29 12:35:32 crc kubenswrapper[4753]: I0129 12:35:32.436079 4753 scope.go:117] "RemoveContainer" containerID="c8477d3a3cf990369d0f904142726f4c467abf348262470a40ead8e3bb718e21" Jan 29 12:35:32 crc kubenswrapper[4753]: I0129 12:35:32.454182 4753 scope.go:117] "RemoveContainer" containerID="62733507a6a0151e3bae922b7f73904e54600ead2093a140da3d91e0754f0092" Jan 29 12:35:38 crc kubenswrapper[4753]: I0129 12:35:38.888931 4753 scope.go:117] "RemoveContainer" containerID="00211d2562b6632acb04decda9045cbb8d3216adca69cd5bc1380ec42b8211e0" Jan 29 12:35:38 crc kubenswrapper[4753]: E0129 12:35:38.889821 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7c24x_openshift-machine-config-operator(b0310995-a7c7-47c3-ae6c-05daaaba92a6)\"" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" podUID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" Jan 29 12:35:52 crc kubenswrapper[4753]: I0129 12:35:52.889080 4753 scope.go:117] "RemoveContainer" containerID="00211d2562b6632acb04decda9045cbb8d3216adca69cd5bc1380ec42b8211e0" Jan 29 12:35:52 crc kubenswrapper[4753]: E0129 12:35:52.889878 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7c24x_openshift-machine-config-operator(b0310995-a7c7-47c3-ae6c-05daaaba92a6)\"" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" podUID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" Jan 29 12:36:07 crc kubenswrapper[4753]: I0129 12:36:07.891495 4753 scope.go:117] "RemoveContainer" containerID="00211d2562b6632acb04decda9045cbb8d3216adca69cd5bc1380ec42b8211e0" Jan 29 12:36:07 crc kubenswrapper[4753]: E0129 12:36:07.892582 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7c24x_openshift-machine-config-operator(b0310995-a7c7-47c3-ae6c-05daaaba92a6)\"" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" podUID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" Jan 29 12:36:19 crc kubenswrapper[4753]: I0129 12:36:19.888784 4753 scope.go:117] "RemoveContainer" containerID="00211d2562b6632acb04decda9045cbb8d3216adca69cd5bc1380ec42b8211e0" Jan 29 12:36:19 crc kubenswrapper[4753]: E0129 12:36:19.889702 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7c24x_openshift-machine-config-operator(b0310995-a7c7-47c3-ae6c-05daaaba92a6)\"" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" podUID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" Jan 29 12:36:21 crc kubenswrapper[4753]: I0129 12:36:21.322953 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-8dvz9_8fd9a883-0e5e-424f-bc31-199f2103c548/control-plane-machine-set-operator/0.log" Jan 29 12:36:21 crc kubenswrapper[4753]: I0129 12:36:21.410567 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-s4nq8_2d66d4a7-6db6-438c-9d2b-cb2bb0ef846b/kube-rbac-proxy/0.log" Jan 29 12:36:21 crc kubenswrapper[4753]: I0129 12:36:21.494880 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-s4nq8_2d66d4a7-6db6-438c-9d2b-cb2bb0ef846b/machine-api-operator/0.log" Jan 29 12:36:32 crc kubenswrapper[4753]: I0129 12:36:32.909837 4753 scope.go:117] "RemoveContainer" containerID="2fad35715b86b9eae4b2cc0fbe7526b7d0ebdcb67b38c4796c612811ee2907ae" Jan 29 12:36:32 crc kubenswrapper[4753]: I0129 12:36:32.950014 4753 scope.go:117] "RemoveContainer" containerID="e2f9eb2e7c75f4680190aecbec04893b350096f32936813d869cd07a1d19acb3" Jan 29 12:36:32 crc kubenswrapper[4753]: I0129 12:36:32.974752 4753 scope.go:117] "RemoveContainer" containerID="3791fb6ba9c11ddfe0a0e2b33aa5070e1fa3a259202dd953348f149fe308359c" Jan 29 12:36:34 crc kubenswrapper[4753]: I0129 12:36:34.888334 4753 scope.go:117] "RemoveContainer" containerID="00211d2562b6632acb04decda9045cbb8d3216adca69cd5bc1380ec42b8211e0" Jan 29 12:36:34 crc kubenswrapper[4753]: E0129 12:36:34.888875 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7c24x_openshift-machine-config-operator(b0310995-a7c7-47c3-ae6c-05daaaba92a6)\"" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" podUID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" Jan 29 12:36:46 crc kubenswrapper[4753]: I0129 12:36:46.888745 4753 scope.go:117] "RemoveContainer" containerID="00211d2562b6632acb04decda9045cbb8d3216adca69cd5bc1380ec42b8211e0" Jan 29 12:36:46 crc kubenswrapper[4753]: E0129 12:36:46.889420 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7c24x_openshift-machine-config-operator(b0310995-a7c7-47c3-ae6c-05daaaba92a6)\"" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" podUID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" Jan 29 12:36:51 crc kubenswrapper[4753]: I0129 12:36:51.868424 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6968d8fdc4-n9w5v_3135e3f4-69fe-446f-9778-3e77d2b07dbf/kube-rbac-proxy/0.log" Jan 29 12:36:51 crc kubenswrapper[4753]: I0129 12:36:51.879487 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6968d8fdc4-n9w5v_3135e3f4-69fe-446f-9778-3e77d2b07dbf/controller/0.log" Jan 29 12:36:52 crc kubenswrapper[4753]: I0129 12:36:52.037004 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-7df86c4f6c-g8vcz_e87775ff-faac-474c-814f-6873998fc276/frr-k8s-webhook-server/0.log" Jan 29 12:36:52 crc kubenswrapper[4753]: I0129 12:36:52.146868 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-z5drz_4b689b8d-ccc2-49e6-a449-c77e354f5f86/cp-frr-files/0.log" Jan 29 12:36:52 crc kubenswrapper[4753]: I0129 12:36:52.260191 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-z5drz_4b689b8d-ccc2-49e6-a449-c77e354f5f86/cp-frr-files/0.log" Jan 29 12:36:52 crc kubenswrapper[4753]: I0129 12:36:52.279594 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-z5drz_4b689b8d-ccc2-49e6-a449-c77e354f5f86/cp-metrics/0.log" Jan 29 12:36:52 crc kubenswrapper[4753]: I0129 12:36:52.297878 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-z5drz_4b689b8d-ccc2-49e6-a449-c77e354f5f86/cp-reloader/0.log" Jan 29 12:36:52 crc kubenswrapper[4753]: I0129 12:36:52.344522 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-z5drz_4b689b8d-ccc2-49e6-a449-c77e354f5f86/cp-reloader/0.log" Jan 29 12:36:52 crc kubenswrapper[4753]: I0129 12:36:52.526615 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-z5drz_4b689b8d-ccc2-49e6-a449-c77e354f5f86/cp-reloader/0.log" Jan 29 12:36:52 crc kubenswrapper[4753]: I0129 12:36:52.536570 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-z5drz_4b689b8d-ccc2-49e6-a449-c77e354f5f86/cp-metrics/0.log" Jan 29 12:36:52 crc kubenswrapper[4753]: I0129 12:36:52.542256 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-z5drz_4b689b8d-ccc2-49e6-a449-c77e354f5f86/cp-metrics/0.log" Jan 29 12:36:52 crc kubenswrapper[4753]: I0129 12:36:52.542635 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-z5drz_4b689b8d-ccc2-49e6-a449-c77e354f5f86/cp-frr-files/0.log" Jan 29 12:36:52 crc kubenswrapper[4753]: I0129 12:36:52.713789 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-z5drz_4b689b8d-ccc2-49e6-a449-c77e354f5f86/cp-reloader/0.log" Jan 29 12:36:52 crc kubenswrapper[4753]: I0129 12:36:52.714489 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-z5drz_4b689b8d-ccc2-49e6-a449-c77e354f5f86/cp-metrics/0.log" Jan 29 12:36:52 crc kubenswrapper[4753]: I0129 12:36:52.731691 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-z5drz_4b689b8d-ccc2-49e6-a449-c77e354f5f86/controller/0.log" Jan 29 12:36:52 crc kubenswrapper[4753]: I0129 12:36:52.734448 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-z5drz_4b689b8d-ccc2-49e6-a449-c77e354f5f86/cp-frr-files/0.log" Jan 29 12:36:52 crc kubenswrapper[4753]: I0129 12:36:52.915364 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-z5drz_4b689b8d-ccc2-49e6-a449-c77e354f5f86/frr-metrics/0.log" Jan 29 12:36:52 crc kubenswrapper[4753]: I0129 12:36:52.918688 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-z5drz_4b689b8d-ccc2-49e6-a449-c77e354f5f86/kube-rbac-proxy-frr/0.log" Jan 29 12:36:52 crc kubenswrapper[4753]: I0129 12:36:52.940533 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-z5drz_4b689b8d-ccc2-49e6-a449-c77e354f5f86/kube-rbac-proxy/0.log" Jan 29 12:36:53 crc kubenswrapper[4753]: I0129 12:36:53.091356 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-z5drz_4b689b8d-ccc2-49e6-a449-c77e354f5f86/reloader/0.log" Jan 29 12:36:53 crc kubenswrapper[4753]: I0129 12:36:53.180072 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-68df96bfd5-7k5hh_3695d573-e0e3-469a-b536-6ed32ade8e82/manager/0.log" Jan 29 12:36:53 crc kubenswrapper[4753]: I0129 12:36:53.397734 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-6787488fdd-zv44d_f743a92e-c395-4123-b0ed-58237f1b1c8f/webhook-server/0.log" Jan 29 12:36:53 crc kubenswrapper[4753]: I0129 12:36:53.514414 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-5fz59_f59f410e-2d13-4bbc-aca6-c50536d77905/kube-rbac-proxy/0.log" Jan 29 12:36:53 crc kubenswrapper[4753]: I0129 12:36:53.536706 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-z5drz_4b689b8d-ccc2-49e6-a449-c77e354f5f86/frr/0.log" Jan 29 12:36:53 crc kubenswrapper[4753]: I0129 12:36:53.791372 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-5fz59_f59f410e-2d13-4bbc-aca6-c50536d77905/speaker/0.log" Jan 29 12:37:00 crc kubenswrapper[4753]: I0129 12:37:00.888731 4753 scope.go:117] "RemoveContainer" containerID="00211d2562b6632acb04decda9045cbb8d3216adca69cd5bc1380ec42b8211e0" Jan 29 12:37:00 crc kubenswrapper[4753]: E0129 12:37:00.889511 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7c24x_openshift-machine-config-operator(b0310995-a7c7-47c3-ae6c-05daaaba92a6)\"" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" podUID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" Jan 29 12:37:13 crc kubenswrapper[4753]: I0129 12:37:13.889004 4753 scope.go:117] "RemoveContainer" containerID="00211d2562b6632acb04decda9045cbb8d3216adca69cd5bc1380ec42b8211e0" Jan 29 12:37:13 crc kubenswrapper[4753]: E0129 12:37:13.890020 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7c24x_openshift-machine-config-operator(b0310995-a7c7-47c3-ae6c-05daaaba92a6)\"" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" podUID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" Jan 29 12:37:21 crc kubenswrapper[4753]: I0129 12:37:21.138925 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dc7b4vm_01efdd65-431c-4f40-b00f-00cc75ed4682/util/0.log" Jan 29 12:37:21 crc kubenswrapper[4753]: I0129 12:37:21.139361 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dc7b4vm_01efdd65-431c-4f40-b00f-00cc75ed4682/pull/0.log" Jan 29 12:37:21 crc kubenswrapper[4753]: I0129 12:37:21.140060 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dc7b4vm_01efdd65-431c-4f40-b00f-00cc75ed4682/util/0.log" Jan 29 12:37:21 crc kubenswrapper[4753]: I0129 12:37:21.141413 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dc7b4vm_01efdd65-431c-4f40-b00f-00cc75ed4682/pull/0.log" Jan 29 12:37:21 crc kubenswrapper[4753]: I0129 12:37:21.344589 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dc7b4vm_01efdd65-431c-4f40-b00f-00cc75ed4682/extract/0.log" Jan 29 12:37:21 crc kubenswrapper[4753]: I0129 12:37:21.498650 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-892wz_30b357a9-c353-4290-92a0-cbec35e161a0/extract-utilities/0.log" Jan 29 12:37:21 crc kubenswrapper[4753]: I0129 12:37:21.515565 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dc7b4vm_01efdd65-431c-4f40-b00f-00cc75ed4682/pull/0.log" Jan 29 12:37:21 crc kubenswrapper[4753]: I0129 12:37:21.542283 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dc7b4vm_01efdd65-431c-4f40-b00f-00cc75ed4682/util/0.log" Jan 29 12:37:21 crc kubenswrapper[4753]: I0129 12:37:21.879214 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-892wz_30b357a9-c353-4290-92a0-cbec35e161a0/extract-utilities/0.log" Jan 29 12:37:21 crc kubenswrapper[4753]: I0129 12:37:21.884583 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-892wz_30b357a9-c353-4290-92a0-cbec35e161a0/extract-content/0.log" Jan 29 12:37:21 crc kubenswrapper[4753]: I0129 12:37:21.915137 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-892wz_30b357a9-c353-4290-92a0-cbec35e161a0/extract-content/0.log" Jan 29 12:37:22 crc kubenswrapper[4753]: I0129 12:37:22.133393 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-892wz_30b357a9-c353-4290-92a0-cbec35e161a0/extract-utilities/0.log" Jan 29 12:37:22 crc kubenswrapper[4753]: I0129 12:37:22.141445 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-892wz_30b357a9-c353-4290-92a0-cbec35e161a0/extract-content/0.log" Jan 29 12:37:22 crc kubenswrapper[4753]: I0129 12:37:22.337939 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-zscj5_1008e065-fc17-492d-8775-ced31b31aa22/extract-utilities/0.log" Jan 29 12:37:22 crc kubenswrapper[4753]: I0129 12:37:22.654005 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-zscj5_1008e065-fc17-492d-8775-ced31b31aa22/extract-content/0.log" Jan 29 12:37:22 crc kubenswrapper[4753]: I0129 12:37:22.654158 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-zscj5_1008e065-fc17-492d-8775-ced31b31aa22/extract-content/0.log" Jan 29 12:37:22 crc kubenswrapper[4753]: I0129 12:37:22.696506 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-zscj5_1008e065-fc17-492d-8775-ced31b31aa22/extract-utilities/0.log" Jan 29 12:37:22 crc kubenswrapper[4753]: I0129 12:37:22.743530 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-892wz_30b357a9-c353-4290-92a0-cbec35e161a0/registry-server/0.log" Jan 29 12:37:23 crc kubenswrapper[4753]: I0129 12:37:23.053648 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-zscj5_1008e065-fc17-492d-8775-ced31b31aa22/extract-content/0.log" Jan 29 12:37:23 crc kubenswrapper[4753]: I0129 12:37:23.191424 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-zscj5_1008e065-fc17-492d-8775-ced31b31aa22/extract-utilities/0.log" Jan 29 12:37:23 crc kubenswrapper[4753]: I0129 12:37:23.358181 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-kmlbh_8c12a149-f214-43c8-a40d-57c615cbe69e/marketplace-operator/0.log" Jan 29 12:37:23 crc kubenswrapper[4753]: I0129 12:37:23.400719 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-52zgz_ea63566b-9a3b-4a69-aaba-18e93b24f5c3/extract-utilities/0.log" Jan 29 12:37:23 crc kubenswrapper[4753]: I0129 12:37:23.700329 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-52zgz_ea63566b-9a3b-4a69-aaba-18e93b24f5c3/extract-utilities/0.log" Jan 29 12:37:23 crc kubenswrapper[4753]: I0129 12:37:23.706257 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-52zgz_ea63566b-9a3b-4a69-aaba-18e93b24f5c3/extract-content/0.log" Jan 29 12:37:23 crc kubenswrapper[4753]: I0129 12:37:23.729273 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-zscj5_1008e065-fc17-492d-8775-ced31b31aa22/registry-server/0.log" Jan 29 12:37:23 crc kubenswrapper[4753]: I0129 12:37:23.741591 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-52zgz_ea63566b-9a3b-4a69-aaba-18e93b24f5c3/extract-content/0.log" Jan 29 12:37:23 crc kubenswrapper[4753]: I0129 12:37:23.870519 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-52zgz_ea63566b-9a3b-4a69-aaba-18e93b24f5c3/extract-utilities/0.log" Jan 29 12:37:23 crc kubenswrapper[4753]: I0129 12:37:23.916097 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-52zgz_ea63566b-9a3b-4a69-aaba-18e93b24f5c3/extract-content/0.log" Jan 29 12:37:23 crc kubenswrapper[4753]: I0129 12:37:23.983317 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-52zgz_ea63566b-9a3b-4a69-aaba-18e93b24f5c3/registry-server/0.log" Jan 29 12:37:24 crc kubenswrapper[4753]: I0129 12:37:24.063896 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-5qfcb_4b6c93ff-5f67-421a-beca-c1b588d8535d/extract-utilities/0.log" Jan 29 12:37:24 crc kubenswrapper[4753]: I0129 12:37:24.264071 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-5qfcb_4b6c93ff-5f67-421a-beca-c1b588d8535d/extract-content/0.log" Jan 29 12:37:24 crc kubenswrapper[4753]: I0129 12:37:24.264319 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-5qfcb_4b6c93ff-5f67-421a-beca-c1b588d8535d/extract-content/0.log" Jan 29 12:37:24 crc kubenswrapper[4753]: I0129 12:37:24.266255 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-5qfcb_4b6c93ff-5f67-421a-beca-c1b588d8535d/extract-utilities/0.log" Jan 29 12:37:24 crc kubenswrapper[4753]: I0129 12:37:24.637485 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-5qfcb_4b6c93ff-5f67-421a-beca-c1b588d8535d/extract-utilities/0.log" Jan 29 12:37:24 crc kubenswrapper[4753]: I0129 12:37:24.648088 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-5qfcb_4b6c93ff-5f67-421a-beca-c1b588d8535d/extract-content/0.log" Jan 29 12:37:25 crc kubenswrapper[4753]: I0129 12:37:25.392012 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-5qfcb_4b6c93ff-5f67-421a-beca-c1b588d8535d/registry-server/0.log" Jan 29 12:37:28 crc kubenswrapper[4753]: I0129 12:37:28.889454 4753 scope.go:117] "RemoveContainer" containerID="00211d2562b6632acb04decda9045cbb8d3216adca69cd5bc1380ec42b8211e0" Jan 29 12:37:28 crc kubenswrapper[4753]: E0129 12:37:28.891110 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7c24x_openshift-machine-config-operator(b0310995-a7c7-47c3-ae6c-05daaaba92a6)\"" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" podUID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" Jan 29 12:37:33 crc kubenswrapper[4753]: I0129 12:37:33.090408 4753 scope.go:117] "RemoveContainer" containerID="16c189b240c3b6af0b5143c94f27ad8f278cf55c71fde6ff59b1123203d274ec" Jan 29 12:37:33 crc kubenswrapper[4753]: I0129 12:37:33.117274 4753 scope.go:117] "RemoveContainer" containerID="512e69e3786e84e63a587d8d9745a6408e329502de45c30a1be8a2813fce175e" Jan 29 12:37:33 crc kubenswrapper[4753]: I0129 12:37:33.143086 4753 scope.go:117] "RemoveContainer" containerID="d29a9d10061279a2805ca6403127e650b1bbfe36a4163df4e079fae54c5a5970" Jan 29 12:37:33 crc kubenswrapper[4753]: I0129 12:37:33.161112 4753 scope.go:117] "RemoveContainer" containerID="0aec20004c67a50d19a5c8d1cf7ed9018b4b95c39a5978455971cdd8179addd1" Jan 29 12:37:33 crc kubenswrapper[4753]: I0129 12:37:33.179768 4753 scope.go:117] "RemoveContainer" containerID="49988160277c5cc45dd8900eb16d03199ec0c4ffa40e5904a5dbb7b9d2078f17" Jan 29 12:37:33 crc kubenswrapper[4753]: I0129 12:37:33.199719 4753 scope.go:117] "RemoveContainer" containerID="e0df4d24defb12bbb6148384cb20d00499a3423ed6132f66831d8d0399b66e27" Jan 29 12:37:33 crc kubenswrapper[4753]: I0129 12:37:33.217941 4753 scope.go:117] "RemoveContainer" containerID="d960e7df489abcbee5c2ce5c0b0cfbaf0f0c2dac4e26fbf6f15ad27ca9f3ea32" Jan 29 12:37:33 crc kubenswrapper[4753]: I0129 12:37:33.260163 4753 scope.go:117] "RemoveContainer" containerID="f06ef499c931eccca294f5cfa38f6da4b4d8519bacce3cc18ebb231c7e9803a7" Jan 29 12:37:33 crc kubenswrapper[4753]: I0129 12:37:33.289771 4753 scope.go:117] "RemoveContainer" containerID="e93a1c2eebca268c75cadde6e248adb908e05fb31946984b83eff825afbd1777" Jan 29 12:37:33 crc kubenswrapper[4753]: I0129 12:37:33.302911 4753 scope.go:117] "RemoveContainer" containerID="54791b5e4f8e371f9007a17143cbc5838793bd8d3e1ce65c2822161e12d6146c" Jan 29 12:37:33 crc kubenswrapper[4753]: I0129 12:37:33.315818 4753 scope.go:117] "RemoveContainer" containerID="194bd65bd1293b044532e09dbd55bdc880c52603b4ee58046654673e07e25e17" Jan 29 12:37:33 crc kubenswrapper[4753]: I0129 12:37:33.332531 4753 scope.go:117] "RemoveContainer" containerID="f0eddfb27e0b1ca03ff86905f0d1b81d674be9160c615a4f016f519d650869e7" Jan 29 12:37:33 crc kubenswrapper[4753]: I0129 12:37:33.352129 4753 scope.go:117] "RemoveContainer" containerID="4f9f3bd4b5dcda78683a4fd9f6169c0ba4cacc4f6a05deb5fb5b67519e1959ef" Jan 29 12:37:33 crc kubenswrapper[4753]: I0129 12:37:33.371826 4753 scope.go:117] "RemoveContainer" containerID="6d62737aaefa0ae329ab69794e4a09fdbf4a1956c0ade81f0a19a89a0f563158" Jan 29 12:37:33 crc kubenswrapper[4753]: I0129 12:37:33.392700 4753 scope.go:117] "RemoveContainer" containerID="af1ebe0bf77572149389aa0735cd954615cb960c22f2d09604e56b99ece70e08" Jan 29 12:37:33 crc kubenswrapper[4753]: I0129 12:37:33.425996 4753 scope.go:117] "RemoveContainer" containerID="7375ec7e5486a51f1304b60d7e06caa5a53e836b083d9d627059439991053ffd" Jan 29 12:37:33 crc kubenswrapper[4753]: I0129 12:37:33.443765 4753 scope.go:117] "RemoveContainer" containerID="3d01ab550711d3f6ed3858797bfec7a7069bc75766d3c545e26de6dea09f5fb8" Jan 29 12:37:33 crc kubenswrapper[4753]: I0129 12:37:33.458387 4753 scope.go:117] "RemoveContainer" containerID="b626d4cf4f9f12b19febd2612ba28c38240c1ae05ccad16175116f001f711318" Jan 29 12:37:33 crc kubenswrapper[4753]: I0129 12:37:33.524419 4753 scope.go:117] "RemoveContainer" containerID="b028c59c7810639143318f825119fe54eacc1a661cb573dc2455ed7e2ba850ed" Jan 29 12:37:42 crc kubenswrapper[4753]: I0129 12:37:42.888448 4753 scope.go:117] "RemoveContainer" containerID="00211d2562b6632acb04decda9045cbb8d3216adca69cd5bc1380ec42b8211e0" Jan 29 12:37:42 crc kubenswrapper[4753]: E0129 12:37:42.889294 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7c24x_openshift-machine-config-operator(b0310995-a7c7-47c3-ae6c-05daaaba92a6)\"" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" podUID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" Jan 29 12:37:55 crc kubenswrapper[4753]: I0129 12:37:55.888419 4753 scope.go:117] "RemoveContainer" containerID="00211d2562b6632acb04decda9045cbb8d3216adca69cd5bc1380ec42b8211e0" Jan 29 12:37:55 crc kubenswrapper[4753]: E0129 12:37:55.889181 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7c24x_openshift-machine-config-operator(b0310995-a7c7-47c3-ae6c-05daaaba92a6)\"" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" podUID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" Jan 29 12:38:04 crc kubenswrapper[4753]: I0129 12:38:04.677296 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-5mhnb"] Jan 29 12:38:04 crc kubenswrapper[4753]: I0129 12:38:04.680555 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5mhnb" Jan 29 12:38:04 crc kubenswrapper[4753]: I0129 12:38:04.704783 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-5mhnb"] Jan 29 12:38:04 crc kubenswrapper[4753]: I0129 12:38:04.721538 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/01b1fe22-8cf8-47a3-8b11-fc245c8eed6b-utilities\") pod \"redhat-marketplace-5mhnb\" (UID: \"01b1fe22-8cf8-47a3-8b11-fc245c8eed6b\") " pod="openshift-marketplace/redhat-marketplace-5mhnb" Jan 29 12:38:04 crc kubenswrapper[4753]: I0129 12:38:04.721608 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/01b1fe22-8cf8-47a3-8b11-fc245c8eed6b-catalog-content\") pod \"redhat-marketplace-5mhnb\" (UID: \"01b1fe22-8cf8-47a3-8b11-fc245c8eed6b\") " pod="openshift-marketplace/redhat-marketplace-5mhnb" Jan 29 12:38:04 crc kubenswrapper[4753]: I0129 12:38:04.721700 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8c28h\" (UniqueName: \"kubernetes.io/projected/01b1fe22-8cf8-47a3-8b11-fc245c8eed6b-kube-api-access-8c28h\") pod \"redhat-marketplace-5mhnb\" (UID: \"01b1fe22-8cf8-47a3-8b11-fc245c8eed6b\") " pod="openshift-marketplace/redhat-marketplace-5mhnb" Jan 29 12:38:04 crc kubenswrapper[4753]: I0129 12:38:04.823085 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/01b1fe22-8cf8-47a3-8b11-fc245c8eed6b-utilities\") pod \"redhat-marketplace-5mhnb\" (UID: \"01b1fe22-8cf8-47a3-8b11-fc245c8eed6b\") " pod="openshift-marketplace/redhat-marketplace-5mhnb" Jan 29 12:38:04 crc kubenswrapper[4753]: I0129 12:38:04.823167 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/01b1fe22-8cf8-47a3-8b11-fc245c8eed6b-catalog-content\") pod \"redhat-marketplace-5mhnb\" (UID: \"01b1fe22-8cf8-47a3-8b11-fc245c8eed6b\") " pod="openshift-marketplace/redhat-marketplace-5mhnb" Jan 29 12:38:04 crc kubenswrapper[4753]: I0129 12:38:04.823278 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8c28h\" (UniqueName: \"kubernetes.io/projected/01b1fe22-8cf8-47a3-8b11-fc245c8eed6b-kube-api-access-8c28h\") pod \"redhat-marketplace-5mhnb\" (UID: \"01b1fe22-8cf8-47a3-8b11-fc245c8eed6b\") " pod="openshift-marketplace/redhat-marketplace-5mhnb" Jan 29 12:38:04 crc kubenswrapper[4753]: I0129 12:38:04.940910 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/01b1fe22-8cf8-47a3-8b11-fc245c8eed6b-catalog-content\") pod \"redhat-marketplace-5mhnb\" (UID: \"01b1fe22-8cf8-47a3-8b11-fc245c8eed6b\") " pod="openshift-marketplace/redhat-marketplace-5mhnb" Jan 29 12:38:04 crc kubenswrapper[4753]: I0129 12:38:04.941273 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/01b1fe22-8cf8-47a3-8b11-fc245c8eed6b-utilities\") pod \"redhat-marketplace-5mhnb\" (UID: \"01b1fe22-8cf8-47a3-8b11-fc245c8eed6b\") " pod="openshift-marketplace/redhat-marketplace-5mhnb" Jan 29 12:38:04 crc kubenswrapper[4753]: I0129 12:38:04.966212 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8c28h\" (UniqueName: \"kubernetes.io/projected/01b1fe22-8cf8-47a3-8b11-fc245c8eed6b-kube-api-access-8c28h\") pod \"redhat-marketplace-5mhnb\" (UID: \"01b1fe22-8cf8-47a3-8b11-fc245c8eed6b\") " pod="openshift-marketplace/redhat-marketplace-5mhnb" Jan 29 12:38:05 crc kubenswrapper[4753]: I0129 12:38:05.024682 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5mhnb" Jan 29 12:38:05 crc kubenswrapper[4753]: I0129 12:38:05.710873 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-5mhnb"] Jan 29 12:38:06 crc kubenswrapper[4753]: I0129 12:38:06.139320 4753 generic.go:334] "Generic (PLEG): container finished" podID="01b1fe22-8cf8-47a3-8b11-fc245c8eed6b" containerID="fd9db5da531f48a8ff4062bb7f8d38fb1199bcdda6346c77b7564217a8dac899" exitCode=0 Jan 29 12:38:06 crc kubenswrapper[4753]: I0129 12:38:06.139430 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5mhnb" event={"ID":"01b1fe22-8cf8-47a3-8b11-fc245c8eed6b","Type":"ContainerDied","Data":"fd9db5da531f48a8ff4062bb7f8d38fb1199bcdda6346c77b7564217a8dac899"} Jan 29 12:38:06 crc kubenswrapper[4753]: I0129 12:38:06.139572 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5mhnb" event={"ID":"01b1fe22-8cf8-47a3-8b11-fc245c8eed6b","Type":"ContainerStarted","Data":"de654295d97b1106968e6f73c021bba8c7936728cd5b28ebe9b27a8ead8b14b0"} Jan 29 12:38:06 crc kubenswrapper[4753]: I0129 12:38:06.141097 4753 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 29 12:38:07 crc kubenswrapper[4753]: I0129 12:38:07.147177 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5mhnb" event={"ID":"01b1fe22-8cf8-47a3-8b11-fc245c8eed6b","Type":"ContainerStarted","Data":"481a3c7d2ac60bd3b90c161fb5079cdee2aae45823f2f6fec0bb7b27fe398831"} Jan 29 12:38:08 crc kubenswrapper[4753]: I0129 12:38:08.160651 4753 generic.go:334] "Generic (PLEG): container finished" podID="01b1fe22-8cf8-47a3-8b11-fc245c8eed6b" containerID="481a3c7d2ac60bd3b90c161fb5079cdee2aae45823f2f6fec0bb7b27fe398831" exitCode=0 Jan 29 12:38:08 crc kubenswrapper[4753]: I0129 12:38:08.160728 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5mhnb" event={"ID":"01b1fe22-8cf8-47a3-8b11-fc245c8eed6b","Type":"ContainerDied","Data":"481a3c7d2ac60bd3b90c161fb5079cdee2aae45823f2f6fec0bb7b27fe398831"} Jan 29 12:38:09 crc kubenswrapper[4753]: I0129 12:38:09.170426 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5mhnb" event={"ID":"01b1fe22-8cf8-47a3-8b11-fc245c8eed6b","Type":"ContainerStarted","Data":"bc37c07bde37a849111a9d821ce0022bd6c2f38281081e2d015ebf2ef8708bfd"} Jan 29 12:38:09 crc kubenswrapper[4753]: I0129 12:38:09.207158 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-5mhnb" podStartSLOduration=2.635947563 podStartE2EDuration="5.207099958s" podCreationTimestamp="2026-01-29 12:38:04 +0000 UTC" firstStartedPulling="2026-01-29 12:38:06.140716944 +0000 UTC m=+1900.392798409" lastFinishedPulling="2026-01-29 12:38:08.711869309 +0000 UTC m=+1902.963950804" observedRunningTime="2026-01-29 12:38:09.200072966 +0000 UTC m=+1903.452154421" watchObservedRunningTime="2026-01-29 12:38:09.207099958 +0000 UTC m=+1903.459181413" Jan 29 12:38:09 crc kubenswrapper[4753]: I0129 12:38:09.888634 4753 scope.go:117] "RemoveContainer" containerID="00211d2562b6632acb04decda9045cbb8d3216adca69cd5bc1380ec42b8211e0" Jan 29 12:38:10 crc kubenswrapper[4753]: I0129 12:38:10.179426 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" event={"ID":"b0310995-a7c7-47c3-ae6c-05daaaba92a6","Type":"ContainerStarted","Data":"68d8fb44353981db7ed428c16c1be1006209ecd529ce7dc45390bb208bf8ee08"} Jan 29 12:38:15 crc kubenswrapper[4753]: I0129 12:38:15.024877 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-5mhnb" Jan 29 12:38:15 crc kubenswrapper[4753]: I0129 12:38:15.027788 4753 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-5mhnb" Jan 29 12:38:15 crc kubenswrapper[4753]: I0129 12:38:15.068740 4753 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-5mhnb" Jan 29 12:38:15 crc kubenswrapper[4753]: I0129 12:38:15.326343 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-5mhnb" Jan 29 12:38:15 crc kubenswrapper[4753]: I0129 12:38:15.372862 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-5mhnb"] Jan 29 12:38:17 crc kubenswrapper[4753]: I0129 12:38:17.309590 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-5mhnb" podUID="01b1fe22-8cf8-47a3-8b11-fc245c8eed6b" containerName="registry-server" containerID="cri-o://bc37c07bde37a849111a9d821ce0022bd6c2f38281081e2d015ebf2ef8708bfd" gracePeriod=2 Jan 29 12:38:17 crc kubenswrapper[4753]: I0129 12:38:17.690558 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5mhnb" Jan 29 12:38:17 crc kubenswrapper[4753]: I0129 12:38:17.722709 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/01b1fe22-8cf8-47a3-8b11-fc245c8eed6b-catalog-content\") pod \"01b1fe22-8cf8-47a3-8b11-fc245c8eed6b\" (UID: \"01b1fe22-8cf8-47a3-8b11-fc245c8eed6b\") " Jan 29 12:38:17 crc kubenswrapper[4753]: I0129 12:38:17.722805 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8c28h\" (UniqueName: \"kubernetes.io/projected/01b1fe22-8cf8-47a3-8b11-fc245c8eed6b-kube-api-access-8c28h\") pod \"01b1fe22-8cf8-47a3-8b11-fc245c8eed6b\" (UID: \"01b1fe22-8cf8-47a3-8b11-fc245c8eed6b\") " Jan 29 12:38:17 crc kubenswrapper[4753]: I0129 12:38:17.722888 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/01b1fe22-8cf8-47a3-8b11-fc245c8eed6b-utilities\") pod \"01b1fe22-8cf8-47a3-8b11-fc245c8eed6b\" (UID: \"01b1fe22-8cf8-47a3-8b11-fc245c8eed6b\") " Jan 29 12:38:17 crc kubenswrapper[4753]: I0129 12:38:17.724032 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/01b1fe22-8cf8-47a3-8b11-fc245c8eed6b-utilities" (OuterVolumeSpecName: "utilities") pod "01b1fe22-8cf8-47a3-8b11-fc245c8eed6b" (UID: "01b1fe22-8cf8-47a3-8b11-fc245c8eed6b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:38:17 crc kubenswrapper[4753]: I0129 12:38:17.730243 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01b1fe22-8cf8-47a3-8b11-fc245c8eed6b-kube-api-access-8c28h" (OuterVolumeSpecName: "kube-api-access-8c28h") pod "01b1fe22-8cf8-47a3-8b11-fc245c8eed6b" (UID: "01b1fe22-8cf8-47a3-8b11-fc245c8eed6b"). InnerVolumeSpecName "kube-api-access-8c28h". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:38:17 crc kubenswrapper[4753]: I0129 12:38:17.769480 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/01b1fe22-8cf8-47a3-8b11-fc245c8eed6b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "01b1fe22-8cf8-47a3-8b11-fc245c8eed6b" (UID: "01b1fe22-8cf8-47a3-8b11-fc245c8eed6b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:38:17 crc kubenswrapper[4753]: I0129 12:38:17.823527 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8c28h\" (UniqueName: \"kubernetes.io/projected/01b1fe22-8cf8-47a3-8b11-fc245c8eed6b-kube-api-access-8c28h\") on node \"crc\" DevicePath \"\"" Jan 29 12:38:17 crc kubenswrapper[4753]: I0129 12:38:17.823835 4753 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/01b1fe22-8cf8-47a3-8b11-fc245c8eed6b-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 12:38:17 crc kubenswrapper[4753]: I0129 12:38:17.823905 4753 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/01b1fe22-8cf8-47a3-8b11-fc245c8eed6b-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 12:38:18 crc kubenswrapper[4753]: I0129 12:38:18.318299 4753 generic.go:334] "Generic (PLEG): container finished" podID="01b1fe22-8cf8-47a3-8b11-fc245c8eed6b" containerID="bc37c07bde37a849111a9d821ce0022bd6c2f38281081e2d015ebf2ef8708bfd" exitCode=0 Jan 29 12:38:18 crc kubenswrapper[4753]: I0129 12:38:18.318355 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5mhnb" event={"ID":"01b1fe22-8cf8-47a3-8b11-fc245c8eed6b","Type":"ContainerDied","Data":"bc37c07bde37a849111a9d821ce0022bd6c2f38281081e2d015ebf2ef8708bfd"} Jan 29 12:38:18 crc kubenswrapper[4753]: I0129 12:38:18.318390 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5mhnb" event={"ID":"01b1fe22-8cf8-47a3-8b11-fc245c8eed6b","Type":"ContainerDied","Data":"de654295d97b1106968e6f73c021bba8c7936728cd5b28ebe9b27a8ead8b14b0"} Jan 29 12:38:18 crc kubenswrapper[4753]: I0129 12:38:18.318411 4753 scope.go:117] "RemoveContainer" containerID="bc37c07bde37a849111a9d821ce0022bd6c2f38281081e2d015ebf2ef8708bfd" Jan 29 12:38:18 crc kubenswrapper[4753]: I0129 12:38:18.318608 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5mhnb" Jan 29 12:38:18 crc kubenswrapper[4753]: I0129 12:38:18.341067 4753 scope.go:117] "RemoveContainer" containerID="481a3c7d2ac60bd3b90c161fb5079cdee2aae45823f2f6fec0bb7b27fe398831" Jan 29 12:38:18 crc kubenswrapper[4753]: I0129 12:38:18.343283 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-5mhnb"] Jan 29 12:38:18 crc kubenswrapper[4753]: I0129 12:38:18.351751 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-5mhnb"] Jan 29 12:38:18 crc kubenswrapper[4753]: I0129 12:38:18.361670 4753 scope.go:117] "RemoveContainer" containerID="fd9db5da531f48a8ff4062bb7f8d38fb1199bcdda6346c77b7564217a8dac899" Jan 29 12:38:18 crc kubenswrapper[4753]: I0129 12:38:18.380050 4753 scope.go:117] "RemoveContainer" containerID="bc37c07bde37a849111a9d821ce0022bd6c2f38281081e2d015ebf2ef8708bfd" Jan 29 12:38:18 crc kubenswrapper[4753]: E0129 12:38:18.380701 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bc37c07bde37a849111a9d821ce0022bd6c2f38281081e2d015ebf2ef8708bfd\": container with ID starting with bc37c07bde37a849111a9d821ce0022bd6c2f38281081e2d015ebf2ef8708bfd not found: ID does not exist" containerID="bc37c07bde37a849111a9d821ce0022bd6c2f38281081e2d015ebf2ef8708bfd" Jan 29 12:38:18 crc kubenswrapper[4753]: I0129 12:38:18.380744 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bc37c07bde37a849111a9d821ce0022bd6c2f38281081e2d015ebf2ef8708bfd"} err="failed to get container status \"bc37c07bde37a849111a9d821ce0022bd6c2f38281081e2d015ebf2ef8708bfd\": rpc error: code = NotFound desc = could not find container \"bc37c07bde37a849111a9d821ce0022bd6c2f38281081e2d015ebf2ef8708bfd\": container with ID starting with bc37c07bde37a849111a9d821ce0022bd6c2f38281081e2d015ebf2ef8708bfd not found: ID does not exist" Jan 29 12:38:18 crc kubenswrapper[4753]: I0129 12:38:18.380768 4753 scope.go:117] "RemoveContainer" containerID="481a3c7d2ac60bd3b90c161fb5079cdee2aae45823f2f6fec0bb7b27fe398831" Jan 29 12:38:18 crc kubenswrapper[4753]: E0129 12:38:18.381054 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"481a3c7d2ac60bd3b90c161fb5079cdee2aae45823f2f6fec0bb7b27fe398831\": container with ID starting with 481a3c7d2ac60bd3b90c161fb5079cdee2aae45823f2f6fec0bb7b27fe398831 not found: ID does not exist" containerID="481a3c7d2ac60bd3b90c161fb5079cdee2aae45823f2f6fec0bb7b27fe398831" Jan 29 12:38:18 crc kubenswrapper[4753]: I0129 12:38:18.381077 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"481a3c7d2ac60bd3b90c161fb5079cdee2aae45823f2f6fec0bb7b27fe398831"} err="failed to get container status \"481a3c7d2ac60bd3b90c161fb5079cdee2aae45823f2f6fec0bb7b27fe398831\": rpc error: code = NotFound desc = could not find container \"481a3c7d2ac60bd3b90c161fb5079cdee2aae45823f2f6fec0bb7b27fe398831\": container with ID starting with 481a3c7d2ac60bd3b90c161fb5079cdee2aae45823f2f6fec0bb7b27fe398831 not found: ID does not exist" Jan 29 12:38:18 crc kubenswrapper[4753]: I0129 12:38:18.381092 4753 scope.go:117] "RemoveContainer" containerID="fd9db5da531f48a8ff4062bb7f8d38fb1199bcdda6346c77b7564217a8dac899" Jan 29 12:38:18 crc kubenswrapper[4753]: E0129 12:38:18.381618 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fd9db5da531f48a8ff4062bb7f8d38fb1199bcdda6346c77b7564217a8dac899\": container with ID starting with fd9db5da531f48a8ff4062bb7f8d38fb1199bcdda6346c77b7564217a8dac899 not found: ID does not exist" containerID="fd9db5da531f48a8ff4062bb7f8d38fb1199bcdda6346c77b7564217a8dac899" Jan 29 12:38:18 crc kubenswrapper[4753]: I0129 12:38:18.381643 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fd9db5da531f48a8ff4062bb7f8d38fb1199bcdda6346c77b7564217a8dac899"} err="failed to get container status \"fd9db5da531f48a8ff4062bb7f8d38fb1199bcdda6346c77b7564217a8dac899\": rpc error: code = NotFound desc = could not find container \"fd9db5da531f48a8ff4062bb7f8d38fb1199bcdda6346c77b7564217a8dac899\": container with ID starting with fd9db5da531f48a8ff4062bb7f8d38fb1199bcdda6346c77b7564217a8dac899 not found: ID does not exist" Jan 29 12:38:19 crc kubenswrapper[4753]: I0129 12:38:19.898187 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01b1fe22-8cf8-47a3-8b11-fc245c8eed6b" path="/var/lib/kubelet/pods/01b1fe22-8cf8-47a3-8b11-fc245c8eed6b/volumes" Jan 29 12:38:33 crc kubenswrapper[4753]: I0129 12:38:33.613348 4753 scope.go:117] "RemoveContainer" containerID="23da8d2b8648a062ac86a655913b68baddecb5237d38120749304817382bdf11" Jan 29 12:38:33 crc kubenswrapper[4753]: I0129 12:38:33.656002 4753 scope.go:117] "RemoveContainer" containerID="afa17d1f3e7e3bc437a4bb1ab0243ed81cbe29753991167d1e4e278f45c29ee5" Jan 29 12:38:46 crc kubenswrapper[4753]: I0129 12:38:46.631056 4753 generic.go:334] "Generic (PLEG): container finished" podID="ea89829a-0669-4039-b610-18262b3eaf9f" containerID="91f8811a03688aed81d0a3e6f48cd444c9b726139a4773cb07b3a70aaacc5f71" exitCode=0 Jan 29 12:38:46 crc kubenswrapper[4753]: I0129 12:38:46.631270 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-mwvhx/must-gather-bb7qn" event={"ID":"ea89829a-0669-4039-b610-18262b3eaf9f","Type":"ContainerDied","Data":"91f8811a03688aed81d0a3e6f48cd444c9b726139a4773cb07b3a70aaacc5f71"} Jan 29 12:38:46 crc kubenswrapper[4753]: I0129 12:38:46.633629 4753 scope.go:117] "RemoveContainer" containerID="91f8811a03688aed81d0a3e6f48cd444c9b726139a4773cb07b3a70aaacc5f71" Jan 29 12:38:46 crc kubenswrapper[4753]: I0129 12:38:46.695693 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-mwvhx_must-gather-bb7qn_ea89829a-0669-4039-b610-18262b3eaf9f/gather/0.log" Jan 29 12:38:54 crc kubenswrapper[4753]: I0129 12:38:54.243158 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-mwvhx/must-gather-bb7qn"] Jan 29 12:38:54 crc kubenswrapper[4753]: I0129 12:38:54.244097 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-mwvhx/must-gather-bb7qn" podUID="ea89829a-0669-4039-b610-18262b3eaf9f" containerName="copy" containerID="cri-o://b3b6a2f7fb9ddbfd564af86f2ea3264eda7d98c9b0359fd02ad09c69cb9fade4" gracePeriod=2 Jan 29 12:38:54 crc kubenswrapper[4753]: I0129 12:38:54.286573 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-mwvhx/must-gather-bb7qn"] Jan 29 12:38:54 crc kubenswrapper[4753]: I0129 12:38:54.896833 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-mwvhx_must-gather-bb7qn_ea89829a-0669-4039-b610-18262b3eaf9f/copy/0.log" Jan 29 12:38:54 crc kubenswrapper[4753]: I0129 12:38:54.899355 4753 generic.go:334] "Generic (PLEG): container finished" podID="ea89829a-0669-4039-b610-18262b3eaf9f" containerID="b3b6a2f7fb9ddbfd564af86f2ea3264eda7d98c9b0359fd02ad09c69cb9fade4" exitCode=143 Jan 29 12:38:54 crc kubenswrapper[4753]: I0129 12:38:54.984593 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-mwvhx_must-gather-bb7qn_ea89829a-0669-4039-b610-18262b3eaf9f/copy/0.log" Jan 29 12:38:54 crc kubenswrapper[4753]: I0129 12:38:54.985648 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-mwvhx/must-gather-bb7qn" Jan 29 12:38:55 crc kubenswrapper[4753]: I0129 12:38:55.138044 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/ea89829a-0669-4039-b610-18262b3eaf9f-must-gather-output\") pod \"ea89829a-0669-4039-b610-18262b3eaf9f\" (UID: \"ea89829a-0669-4039-b610-18262b3eaf9f\") " Jan 29 12:38:55 crc kubenswrapper[4753]: I0129 12:38:55.138130 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kglpn\" (UniqueName: \"kubernetes.io/projected/ea89829a-0669-4039-b610-18262b3eaf9f-kube-api-access-kglpn\") pod \"ea89829a-0669-4039-b610-18262b3eaf9f\" (UID: \"ea89829a-0669-4039-b610-18262b3eaf9f\") " Jan 29 12:38:55 crc kubenswrapper[4753]: I0129 12:38:55.144466 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ea89829a-0669-4039-b610-18262b3eaf9f-kube-api-access-kglpn" (OuterVolumeSpecName: "kube-api-access-kglpn") pod "ea89829a-0669-4039-b610-18262b3eaf9f" (UID: "ea89829a-0669-4039-b610-18262b3eaf9f"). InnerVolumeSpecName "kube-api-access-kglpn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:38:55 crc kubenswrapper[4753]: I0129 12:38:55.212011 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ea89829a-0669-4039-b610-18262b3eaf9f-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "ea89829a-0669-4039-b610-18262b3eaf9f" (UID: "ea89829a-0669-4039-b610-18262b3eaf9f"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:38:55 crc kubenswrapper[4753]: I0129 12:38:55.239349 4753 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/ea89829a-0669-4039-b610-18262b3eaf9f-must-gather-output\") on node \"crc\" DevicePath \"\"" Jan 29 12:38:55 crc kubenswrapper[4753]: I0129 12:38:55.239380 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kglpn\" (UniqueName: \"kubernetes.io/projected/ea89829a-0669-4039-b610-18262b3eaf9f-kube-api-access-kglpn\") on node \"crc\" DevicePath \"\"" Jan 29 12:38:55 crc kubenswrapper[4753]: I0129 12:38:55.897762 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ea89829a-0669-4039-b610-18262b3eaf9f" path="/var/lib/kubelet/pods/ea89829a-0669-4039-b610-18262b3eaf9f/volumes" Jan 29 12:38:55 crc kubenswrapper[4753]: I0129 12:38:55.928144 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-mwvhx_must-gather-bb7qn_ea89829a-0669-4039-b610-18262b3eaf9f/copy/0.log" Jan 29 12:38:55 crc kubenswrapper[4753]: I0129 12:38:55.929452 4753 scope.go:117] "RemoveContainer" containerID="b3b6a2f7fb9ddbfd564af86f2ea3264eda7d98c9b0359fd02ad09c69cb9fade4" Jan 29 12:38:55 crc kubenswrapper[4753]: I0129 12:38:55.929526 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-mwvhx/must-gather-bb7qn" Jan 29 12:38:55 crc kubenswrapper[4753]: I0129 12:38:55.951915 4753 scope.go:117] "RemoveContainer" containerID="91f8811a03688aed81d0a3e6f48cd444c9b726139a4773cb07b3a70aaacc5f71" Jan 29 12:39:33 crc kubenswrapper[4753]: I0129 12:39:33.757832 4753 scope.go:117] "RemoveContainer" containerID="46f7c35ab86b89f8376627ab298147cb497e7949a367a37e03bef56228da0463" Jan 29 12:40:29 crc kubenswrapper[4753]: I0129 12:40:29.254312 4753 patch_prober.go:28] interesting pod/machine-config-daemon-7c24x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 12:40:29 crc kubenswrapper[4753]: I0129 12:40:29.255122 4753 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" podUID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 12:40:33 crc kubenswrapper[4753]: I0129 12:40:33.823100 4753 scope.go:117] "RemoveContainer" containerID="c71356fa86b470c8f7e0cf6bcc6682c8b13a810d9eb2248d5468458188761f33" Jan 29 12:40:56 crc kubenswrapper[4753]: I0129 12:40:56.152992 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-x5fwk"] Jan 29 12:40:56 crc kubenswrapper[4753]: E0129 12:40:56.154172 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="01b1fe22-8cf8-47a3-8b11-fc245c8eed6b" containerName="extract-utilities" Jan 29 12:40:56 crc kubenswrapper[4753]: I0129 12:40:56.154216 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="01b1fe22-8cf8-47a3-8b11-fc245c8eed6b" containerName="extract-utilities" Jan 29 12:40:56 crc kubenswrapper[4753]: E0129 12:40:56.154266 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ea89829a-0669-4039-b610-18262b3eaf9f" containerName="gather" Jan 29 12:40:56 crc kubenswrapper[4753]: I0129 12:40:56.154274 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="ea89829a-0669-4039-b610-18262b3eaf9f" containerName="gather" Jan 29 12:40:56 crc kubenswrapper[4753]: E0129 12:40:56.154300 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="01b1fe22-8cf8-47a3-8b11-fc245c8eed6b" containerName="registry-server" Jan 29 12:40:56 crc kubenswrapper[4753]: I0129 12:40:56.154308 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="01b1fe22-8cf8-47a3-8b11-fc245c8eed6b" containerName="registry-server" Jan 29 12:40:56 crc kubenswrapper[4753]: E0129 12:40:56.154319 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="01b1fe22-8cf8-47a3-8b11-fc245c8eed6b" containerName="extract-content" Jan 29 12:40:56 crc kubenswrapper[4753]: I0129 12:40:56.154326 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="01b1fe22-8cf8-47a3-8b11-fc245c8eed6b" containerName="extract-content" Jan 29 12:40:56 crc kubenswrapper[4753]: E0129 12:40:56.154341 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ea89829a-0669-4039-b610-18262b3eaf9f" containerName="copy" Jan 29 12:40:56 crc kubenswrapper[4753]: I0129 12:40:56.154348 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="ea89829a-0669-4039-b610-18262b3eaf9f" containerName="copy" Jan 29 12:40:56 crc kubenswrapper[4753]: I0129 12:40:56.154602 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="ea89829a-0669-4039-b610-18262b3eaf9f" containerName="gather" Jan 29 12:40:56 crc kubenswrapper[4753]: I0129 12:40:56.154626 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="ea89829a-0669-4039-b610-18262b3eaf9f" containerName="copy" Jan 29 12:40:56 crc kubenswrapper[4753]: I0129 12:40:56.154651 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="01b1fe22-8cf8-47a3-8b11-fc245c8eed6b" containerName="registry-server" Jan 29 12:40:56 crc kubenswrapper[4753]: I0129 12:40:56.155896 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-x5fwk" Jan 29 12:40:56 crc kubenswrapper[4753]: I0129 12:40:56.161399 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-x5fwk"] Jan 29 12:40:56 crc kubenswrapper[4753]: I0129 12:40:56.283053 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d009e07a-706d-46c6-b0b8-938f5f3e099c-catalog-content\") pod \"redhat-operators-x5fwk\" (UID: \"d009e07a-706d-46c6-b0b8-938f5f3e099c\") " pod="openshift-marketplace/redhat-operators-x5fwk" Jan 29 12:40:56 crc kubenswrapper[4753]: I0129 12:40:56.283139 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8gshz\" (UniqueName: \"kubernetes.io/projected/d009e07a-706d-46c6-b0b8-938f5f3e099c-kube-api-access-8gshz\") pod \"redhat-operators-x5fwk\" (UID: \"d009e07a-706d-46c6-b0b8-938f5f3e099c\") " pod="openshift-marketplace/redhat-operators-x5fwk" Jan 29 12:40:56 crc kubenswrapper[4753]: I0129 12:40:56.283174 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d009e07a-706d-46c6-b0b8-938f5f3e099c-utilities\") pod \"redhat-operators-x5fwk\" (UID: \"d009e07a-706d-46c6-b0b8-938f5f3e099c\") " pod="openshift-marketplace/redhat-operators-x5fwk" Jan 29 12:40:56 crc kubenswrapper[4753]: I0129 12:40:56.405860 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d009e07a-706d-46c6-b0b8-938f5f3e099c-catalog-content\") pod \"redhat-operators-x5fwk\" (UID: \"d009e07a-706d-46c6-b0b8-938f5f3e099c\") " pod="openshift-marketplace/redhat-operators-x5fwk" Jan 29 12:40:56 crc kubenswrapper[4753]: I0129 12:40:56.406045 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8gshz\" (UniqueName: \"kubernetes.io/projected/d009e07a-706d-46c6-b0b8-938f5f3e099c-kube-api-access-8gshz\") pod \"redhat-operators-x5fwk\" (UID: \"d009e07a-706d-46c6-b0b8-938f5f3e099c\") " pod="openshift-marketplace/redhat-operators-x5fwk" Jan 29 12:40:56 crc kubenswrapper[4753]: I0129 12:40:56.406097 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d009e07a-706d-46c6-b0b8-938f5f3e099c-utilities\") pod \"redhat-operators-x5fwk\" (UID: \"d009e07a-706d-46c6-b0b8-938f5f3e099c\") " pod="openshift-marketplace/redhat-operators-x5fwk" Jan 29 12:40:56 crc kubenswrapper[4753]: I0129 12:40:56.407154 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d009e07a-706d-46c6-b0b8-938f5f3e099c-utilities\") pod \"redhat-operators-x5fwk\" (UID: \"d009e07a-706d-46c6-b0b8-938f5f3e099c\") " pod="openshift-marketplace/redhat-operators-x5fwk" Jan 29 12:40:56 crc kubenswrapper[4753]: I0129 12:40:56.407279 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d009e07a-706d-46c6-b0b8-938f5f3e099c-catalog-content\") pod \"redhat-operators-x5fwk\" (UID: \"d009e07a-706d-46c6-b0b8-938f5f3e099c\") " pod="openshift-marketplace/redhat-operators-x5fwk" Jan 29 12:40:56 crc kubenswrapper[4753]: I0129 12:40:56.451529 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8gshz\" (UniqueName: \"kubernetes.io/projected/d009e07a-706d-46c6-b0b8-938f5f3e099c-kube-api-access-8gshz\") pod \"redhat-operators-x5fwk\" (UID: \"d009e07a-706d-46c6-b0b8-938f5f3e099c\") " pod="openshift-marketplace/redhat-operators-x5fwk" Jan 29 12:40:56 crc kubenswrapper[4753]: I0129 12:40:56.482082 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-x5fwk" Jan 29 12:40:57 crc kubenswrapper[4753]: I0129 12:40:57.101885 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-x5fwk"] Jan 29 12:40:57 crc kubenswrapper[4753]: I0129 12:40:57.239772 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-x5fwk" event={"ID":"d009e07a-706d-46c6-b0b8-938f5f3e099c","Type":"ContainerStarted","Data":"1a59a2502a941d6ae3a15f5961627991d0403f4f0e0073a87775264ab6033077"} Jan 29 12:40:58 crc kubenswrapper[4753]: I0129 12:40:58.247515 4753 generic.go:334] "Generic (PLEG): container finished" podID="d009e07a-706d-46c6-b0b8-938f5f3e099c" containerID="1c6017db0af2578f0bcff3b30a555048ad029ffac7047246799835e1983bf478" exitCode=0 Jan 29 12:40:58 crc kubenswrapper[4753]: I0129 12:40:58.247833 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-x5fwk" event={"ID":"d009e07a-706d-46c6-b0b8-938f5f3e099c","Type":"ContainerDied","Data":"1c6017db0af2578f0bcff3b30a555048ad029ffac7047246799835e1983bf478"} Jan 29 12:40:59 crc kubenswrapper[4753]: I0129 12:40:59.253315 4753 patch_prober.go:28] interesting pod/machine-config-daemon-7c24x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 12:40:59 crc kubenswrapper[4753]: I0129 12:40:59.254099 4753 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" podUID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 12:41:01 crc kubenswrapper[4753]: I0129 12:41:01.268650 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-x5fwk" event={"ID":"d009e07a-706d-46c6-b0b8-938f5f3e099c","Type":"ContainerStarted","Data":"de484d568180f8d6aa2eef951f7ba9b17e20ead0fd4721283edaf2b93753315c"} Jan 29 12:41:03 crc kubenswrapper[4753]: I0129 12:41:03.281740 4753 generic.go:334] "Generic (PLEG): container finished" podID="d009e07a-706d-46c6-b0b8-938f5f3e099c" containerID="de484d568180f8d6aa2eef951f7ba9b17e20ead0fd4721283edaf2b93753315c" exitCode=0 Jan 29 12:41:03 crc kubenswrapper[4753]: I0129 12:41:03.281799 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-x5fwk" event={"ID":"d009e07a-706d-46c6-b0b8-938f5f3e099c","Type":"ContainerDied","Data":"de484d568180f8d6aa2eef951f7ba9b17e20ead0fd4721283edaf2b93753315c"} Jan 29 12:41:06 crc kubenswrapper[4753]: I0129 12:41:06.303435 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-x5fwk" event={"ID":"d009e07a-706d-46c6-b0b8-938f5f3e099c","Type":"ContainerStarted","Data":"75dd22167ea32da2a961d9d4e94e82462ec13f5ca4396645138c9f5fbf620fa0"} Jan 29 12:41:06 crc kubenswrapper[4753]: I0129 12:41:06.421869 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-x5fwk" podStartSLOduration=3.088649426 podStartE2EDuration="10.421832925s" podCreationTimestamp="2026-01-29 12:40:56 +0000 UTC" firstStartedPulling="2026-01-29 12:40:58.249122704 +0000 UTC m=+2072.501204159" lastFinishedPulling="2026-01-29 12:41:05.582306203 +0000 UTC m=+2079.834387658" observedRunningTime="2026-01-29 12:41:06.421772243 +0000 UTC m=+2080.673853688" watchObservedRunningTime="2026-01-29 12:41:06.421832925 +0000 UTC m=+2080.673914380" Jan 29 12:41:06 crc kubenswrapper[4753]: I0129 12:41:06.491101 4753 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-x5fwk" Jan 29 12:41:06 crc kubenswrapper[4753]: I0129 12:41:06.491167 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-x5fwk" Jan 29 12:41:07 crc kubenswrapper[4753]: I0129 12:41:07.532922 4753 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-x5fwk" podUID="d009e07a-706d-46c6-b0b8-938f5f3e099c" containerName="registry-server" probeResult="failure" output=< Jan 29 12:41:07 crc kubenswrapper[4753]: timeout: failed to connect service ":50051" within 1s Jan 29 12:41:07 crc kubenswrapper[4753]: > Jan 29 12:41:16 crc kubenswrapper[4753]: I0129 12:41:16.527433 4753 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-x5fwk" Jan 29 12:41:16 crc kubenswrapper[4753]: I0129 12:41:16.572352 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-x5fwk" Jan 29 12:41:16 crc kubenswrapper[4753]: I0129 12:41:16.763883 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-x5fwk"] Jan 29 12:41:17 crc kubenswrapper[4753]: I0129 12:41:17.826340 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-x5fwk" podUID="d009e07a-706d-46c6-b0b8-938f5f3e099c" containerName="registry-server" containerID="cri-o://75dd22167ea32da2a961d9d4e94e82462ec13f5ca4396645138c9f5fbf620fa0" gracePeriod=2 Jan 29 12:41:18 crc kubenswrapper[4753]: I0129 12:41:18.835033 4753 generic.go:334] "Generic (PLEG): container finished" podID="d009e07a-706d-46c6-b0b8-938f5f3e099c" containerID="75dd22167ea32da2a961d9d4e94e82462ec13f5ca4396645138c9f5fbf620fa0" exitCode=0 Jan 29 12:41:18 crc kubenswrapper[4753]: I0129 12:41:18.835084 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-x5fwk" event={"ID":"d009e07a-706d-46c6-b0b8-938f5f3e099c","Type":"ContainerDied","Data":"75dd22167ea32da2a961d9d4e94e82462ec13f5ca4396645138c9f5fbf620fa0"} Jan 29 12:41:19 crc kubenswrapper[4753]: I0129 12:41:19.010082 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-x5fwk" Jan 29 12:41:19 crc kubenswrapper[4753]: I0129 12:41:19.038272 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d009e07a-706d-46c6-b0b8-938f5f3e099c-utilities\") pod \"d009e07a-706d-46c6-b0b8-938f5f3e099c\" (UID: \"d009e07a-706d-46c6-b0b8-938f5f3e099c\") " Jan 29 12:41:19 crc kubenswrapper[4753]: I0129 12:41:19.038317 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d009e07a-706d-46c6-b0b8-938f5f3e099c-catalog-content\") pod \"d009e07a-706d-46c6-b0b8-938f5f3e099c\" (UID: \"d009e07a-706d-46c6-b0b8-938f5f3e099c\") " Jan 29 12:41:19 crc kubenswrapper[4753]: I0129 12:41:19.038366 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8gshz\" (UniqueName: \"kubernetes.io/projected/d009e07a-706d-46c6-b0b8-938f5f3e099c-kube-api-access-8gshz\") pod \"d009e07a-706d-46c6-b0b8-938f5f3e099c\" (UID: \"d009e07a-706d-46c6-b0b8-938f5f3e099c\") " Jan 29 12:41:19 crc kubenswrapper[4753]: I0129 12:41:19.321005 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d009e07a-706d-46c6-b0b8-938f5f3e099c-utilities" (OuterVolumeSpecName: "utilities") pod "d009e07a-706d-46c6-b0b8-938f5f3e099c" (UID: "d009e07a-706d-46c6-b0b8-938f5f3e099c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:41:19 crc kubenswrapper[4753]: I0129 12:41:19.330290 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d009e07a-706d-46c6-b0b8-938f5f3e099c-kube-api-access-8gshz" (OuterVolumeSpecName: "kube-api-access-8gshz") pod "d009e07a-706d-46c6-b0b8-938f5f3e099c" (UID: "d009e07a-706d-46c6-b0b8-938f5f3e099c"). InnerVolumeSpecName "kube-api-access-8gshz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:41:19 crc kubenswrapper[4753]: I0129 12:41:19.415577 4753 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d009e07a-706d-46c6-b0b8-938f5f3e099c-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 12:41:19 crc kubenswrapper[4753]: I0129 12:41:19.415606 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8gshz\" (UniqueName: \"kubernetes.io/projected/d009e07a-706d-46c6-b0b8-938f5f3e099c-kube-api-access-8gshz\") on node \"crc\" DevicePath \"\"" Jan 29 12:41:19 crc kubenswrapper[4753]: I0129 12:41:19.436379 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d009e07a-706d-46c6-b0b8-938f5f3e099c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d009e07a-706d-46c6-b0b8-938f5f3e099c" (UID: "d009e07a-706d-46c6-b0b8-938f5f3e099c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:41:19 crc kubenswrapper[4753]: I0129 12:41:19.517224 4753 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d009e07a-706d-46c6-b0b8-938f5f3e099c-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 12:41:19 crc kubenswrapper[4753]: I0129 12:41:19.842751 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-x5fwk" event={"ID":"d009e07a-706d-46c6-b0b8-938f5f3e099c","Type":"ContainerDied","Data":"1a59a2502a941d6ae3a15f5961627991d0403f4f0e0073a87775264ab6033077"} Jan 29 12:41:19 crc kubenswrapper[4753]: I0129 12:41:19.842845 4753 scope.go:117] "RemoveContainer" containerID="75dd22167ea32da2a961d9d4e94e82462ec13f5ca4396645138c9f5fbf620fa0" Jan 29 12:41:19 crc kubenswrapper[4753]: I0129 12:41:19.842888 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-x5fwk" Jan 29 12:41:19 crc kubenswrapper[4753]: I0129 12:41:19.862670 4753 scope.go:117] "RemoveContainer" containerID="de484d568180f8d6aa2eef951f7ba9b17e20ead0fd4721283edaf2b93753315c" Jan 29 12:41:19 crc kubenswrapper[4753]: I0129 12:41:19.880354 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-x5fwk"] Jan 29 12:41:19 crc kubenswrapper[4753]: I0129 12:41:19.885334 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-x5fwk"] Jan 29 12:41:19 crc kubenswrapper[4753]: I0129 12:41:19.886654 4753 scope.go:117] "RemoveContainer" containerID="1c6017db0af2578f0bcff3b30a555048ad029ffac7047246799835e1983bf478" Jan 29 12:41:20 crc kubenswrapper[4753]: I0129 12:41:19.898943 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d009e07a-706d-46c6-b0b8-938f5f3e099c" path="/var/lib/kubelet/pods/d009e07a-706d-46c6-b0b8-938f5f3e099c/volumes" Jan 29 12:41:29 crc kubenswrapper[4753]: I0129 12:41:29.252746 4753 patch_prober.go:28] interesting pod/machine-config-daemon-7c24x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 12:41:29 crc kubenswrapper[4753]: I0129 12:41:29.254323 4753 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" podUID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 12:41:29 crc kubenswrapper[4753]: I0129 12:41:29.254469 4753 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" Jan 29 12:41:29 crc kubenswrapper[4753]: I0129 12:41:29.255207 4753 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"68d8fb44353981db7ed428c16c1be1006209ecd529ce7dc45390bb208bf8ee08"} pod="openshift-machine-config-operator/machine-config-daemon-7c24x" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 29 12:41:29 crc kubenswrapper[4753]: I0129 12:41:29.255392 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" podUID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" containerName="machine-config-daemon" containerID="cri-o://68d8fb44353981db7ed428c16c1be1006209ecd529ce7dc45390bb208bf8ee08" gracePeriod=600 Jan 29 12:41:29 crc kubenswrapper[4753]: I0129 12:41:29.922176 4753 generic.go:334] "Generic (PLEG): container finished" podID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" containerID="68d8fb44353981db7ed428c16c1be1006209ecd529ce7dc45390bb208bf8ee08" exitCode=0 Jan 29 12:41:29 crc kubenswrapper[4753]: I0129 12:41:29.922275 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" event={"ID":"b0310995-a7c7-47c3-ae6c-05daaaba92a6","Type":"ContainerDied","Data":"68d8fb44353981db7ed428c16c1be1006209ecd529ce7dc45390bb208bf8ee08"} Jan 29 12:41:29 crc kubenswrapper[4753]: I0129 12:41:29.922736 4753 scope.go:117] "RemoveContainer" containerID="00211d2562b6632acb04decda9045cbb8d3216adca69cd5bc1380ec42b8211e0" Jan 29 12:41:30 crc kubenswrapper[4753]: I0129 12:41:30.930937 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" event={"ID":"b0310995-a7c7-47c3-ae6c-05daaaba92a6","Type":"ContainerStarted","Data":"c9612c284467c21e6e66f796daa879bac36be707c5a1e8ee913fb0adc13ec2b7"} Jan 29 12:41:36 crc kubenswrapper[4753]: I0129 12:41:36.841268 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-6dx95/must-gather-8h8qb"] Jan 29 12:41:36 crc kubenswrapper[4753]: E0129 12:41:36.842097 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d009e07a-706d-46c6-b0b8-938f5f3e099c" containerName="extract-utilities" Jan 29 12:41:36 crc kubenswrapper[4753]: I0129 12:41:36.842125 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="d009e07a-706d-46c6-b0b8-938f5f3e099c" containerName="extract-utilities" Jan 29 12:41:36 crc kubenswrapper[4753]: E0129 12:41:36.842137 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d009e07a-706d-46c6-b0b8-938f5f3e099c" containerName="registry-server" Jan 29 12:41:36 crc kubenswrapper[4753]: I0129 12:41:36.842143 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="d009e07a-706d-46c6-b0b8-938f5f3e099c" containerName="registry-server" Jan 29 12:41:36 crc kubenswrapper[4753]: E0129 12:41:36.842160 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d009e07a-706d-46c6-b0b8-938f5f3e099c" containerName="extract-content" Jan 29 12:41:36 crc kubenswrapper[4753]: I0129 12:41:36.842167 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="d009e07a-706d-46c6-b0b8-938f5f3e099c" containerName="extract-content" Jan 29 12:41:36 crc kubenswrapper[4753]: I0129 12:41:36.842345 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="d009e07a-706d-46c6-b0b8-938f5f3e099c" containerName="registry-server" Jan 29 12:41:36 crc kubenswrapper[4753]: I0129 12:41:36.843609 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-6dx95/must-gather-8h8qb" Jan 29 12:41:36 crc kubenswrapper[4753]: I0129 12:41:36.851098 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-6dx95"/"openshift-service-ca.crt" Jan 29 12:41:36 crc kubenswrapper[4753]: I0129 12:41:36.860257 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-6dx95"/"kube-root-ca.crt" Jan 29 12:41:36 crc kubenswrapper[4753]: I0129 12:41:36.872329 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-6dx95"/"default-dockercfg-d4stq" Jan 29 12:41:36 crc kubenswrapper[4753]: I0129 12:41:36.880689 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-6dx95/must-gather-8h8qb"] Jan 29 12:41:36 crc kubenswrapper[4753]: I0129 12:41:36.935380 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qzjwd\" (UniqueName: \"kubernetes.io/projected/e66361cf-7d08-49f4-9571-3450b52d2f56-kube-api-access-qzjwd\") pod \"must-gather-8h8qb\" (UID: \"e66361cf-7d08-49f4-9571-3450b52d2f56\") " pod="openshift-must-gather-6dx95/must-gather-8h8qb" Jan 29 12:41:36 crc kubenswrapper[4753]: I0129 12:41:36.935523 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/e66361cf-7d08-49f4-9571-3450b52d2f56-must-gather-output\") pod \"must-gather-8h8qb\" (UID: \"e66361cf-7d08-49f4-9571-3450b52d2f56\") " pod="openshift-must-gather-6dx95/must-gather-8h8qb" Jan 29 12:41:37 crc kubenswrapper[4753]: I0129 12:41:37.036721 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/e66361cf-7d08-49f4-9571-3450b52d2f56-must-gather-output\") pod \"must-gather-8h8qb\" (UID: \"e66361cf-7d08-49f4-9571-3450b52d2f56\") " pod="openshift-must-gather-6dx95/must-gather-8h8qb" Jan 29 12:41:37 crc kubenswrapper[4753]: I0129 12:41:37.036814 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qzjwd\" (UniqueName: \"kubernetes.io/projected/e66361cf-7d08-49f4-9571-3450b52d2f56-kube-api-access-qzjwd\") pod \"must-gather-8h8qb\" (UID: \"e66361cf-7d08-49f4-9571-3450b52d2f56\") " pod="openshift-must-gather-6dx95/must-gather-8h8qb" Jan 29 12:41:37 crc kubenswrapper[4753]: I0129 12:41:37.037301 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/e66361cf-7d08-49f4-9571-3450b52d2f56-must-gather-output\") pod \"must-gather-8h8qb\" (UID: \"e66361cf-7d08-49f4-9571-3450b52d2f56\") " pod="openshift-must-gather-6dx95/must-gather-8h8qb" Jan 29 12:41:37 crc kubenswrapper[4753]: I0129 12:41:37.061603 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qzjwd\" (UniqueName: \"kubernetes.io/projected/e66361cf-7d08-49f4-9571-3450b52d2f56-kube-api-access-qzjwd\") pod \"must-gather-8h8qb\" (UID: \"e66361cf-7d08-49f4-9571-3450b52d2f56\") " pod="openshift-must-gather-6dx95/must-gather-8h8qb" Jan 29 12:41:37 crc kubenswrapper[4753]: I0129 12:41:37.160200 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-6dx95/must-gather-8h8qb" Jan 29 12:41:37 crc kubenswrapper[4753]: I0129 12:41:37.470015 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-6dx95/must-gather-8h8qb"] Jan 29 12:41:37 crc kubenswrapper[4753]: I0129 12:41:37.980878 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-6dx95/must-gather-8h8qb" event={"ID":"e66361cf-7d08-49f4-9571-3450b52d2f56","Type":"ContainerStarted","Data":"c2ee86834fad792bbb75963f60575a63b7ac806178d88924b1dde2e11d2c94db"} Jan 29 12:41:38 crc kubenswrapper[4753]: I0129 12:41:38.989772 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-6dx95/must-gather-8h8qb" event={"ID":"e66361cf-7d08-49f4-9571-3450b52d2f56","Type":"ContainerStarted","Data":"312360de953fbe1c2c4681655fb80e978370d960b8473057aa55ae1d4ed06b31"} Jan 29 12:41:38 crc kubenswrapper[4753]: I0129 12:41:38.989852 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-6dx95/must-gather-8h8qb" event={"ID":"e66361cf-7d08-49f4-9571-3450b52d2f56","Type":"ContainerStarted","Data":"1c1735a2355ec096e143310a1896f58b5c8c4311180ba57e66dbfd510f5b722c"} Jan 29 12:41:39 crc kubenswrapper[4753]: I0129 12:41:39.016563 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-6dx95/must-gather-8h8qb" podStartSLOduration=3.016535186 podStartE2EDuration="3.016535186s" podCreationTimestamp="2026-01-29 12:41:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 12:41:39.004745996 +0000 UTC m=+2113.256827451" watchObservedRunningTime="2026-01-29 12:41:39.016535186 +0000 UTC m=+2113.268616641" Jan 29 12:42:33 crc kubenswrapper[4753]: I0129 12:42:33.320181 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-8dvz9_8fd9a883-0e5e-424f-bc31-199f2103c548/control-plane-machine-set-operator/0.log" Jan 29 12:42:33 crc kubenswrapper[4753]: I0129 12:42:33.711552 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-s4nq8_2d66d4a7-6db6-438c-9d2b-cb2bb0ef846b/kube-rbac-proxy/0.log" Jan 29 12:42:33 crc kubenswrapper[4753]: I0129 12:42:33.806502 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-s4nq8_2d66d4a7-6db6-438c-9d2b-cb2bb0ef846b/machine-api-operator/0.log" Jan 29 12:43:04 crc kubenswrapper[4753]: I0129 12:43:04.248396 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6968d8fdc4-n9w5v_3135e3f4-69fe-446f-9778-3e77d2b07dbf/kube-rbac-proxy/0.log" Jan 29 12:43:04 crc kubenswrapper[4753]: I0129 12:43:04.331640 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6968d8fdc4-n9w5v_3135e3f4-69fe-446f-9778-3e77d2b07dbf/controller/0.log" Jan 29 12:43:04 crc kubenswrapper[4753]: I0129 12:43:04.685755 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-7df86c4f6c-g8vcz_e87775ff-faac-474c-814f-6873998fc276/frr-k8s-webhook-server/0.log" Jan 29 12:43:04 crc kubenswrapper[4753]: I0129 12:43:04.868736 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-z5drz_4b689b8d-ccc2-49e6-a449-c77e354f5f86/cp-frr-files/0.log" Jan 29 12:43:05 crc kubenswrapper[4753]: I0129 12:43:05.083324 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-z5drz_4b689b8d-ccc2-49e6-a449-c77e354f5f86/cp-reloader/0.log" Jan 29 12:43:05 crc kubenswrapper[4753]: I0129 12:43:05.099012 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-z5drz_4b689b8d-ccc2-49e6-a449-c77e354f5f86/cp-metrics/0.log" Jan 29 12:43:05 crc kubenswrapper[4753]: I0129 12:43:05.123559 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-z5drz_4b689b8d-ccc2-49e6-a449-c77e354f5f86/cp-frr-files/0.log" Jan 29 12:43:05 crc kubenswrapper[4753]: I0129 12:43:05.146625 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-z5drz_4b689b8d-ccc2-49e6-a449-c77e354f5f86/cp-reloader/0.log" Jan 29 12:43:05 crc kubenswrapper[4753]: I0129 12:43:05.421181 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-z5drz_4b689b8d-ccc2-49e6-a449-c77e354f5f86/cp-frr-files/0.log" Jan 29 12:43:05 crc kubenswrapper[4753]: I0129 12:43:05.423879 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-z5drz_4b689b8d-ccc2-49e6-a449-c77e354f5f86/cp-reloader/0.log" Jan 29 12:43:05 crc kubenswrapper[4753]: I0129 12:43:05.449275 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-z5drz_4b689b8d-ccc2-49e6-a449-c77e354f5f86/cp-metrics/0.log" Jan 29 12:43:05 crc kubenswrapper[4753]: I0129 12:43:05.637620 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-z5drz_4b689b8d-ccc2-49e6-a449-c77e354f5f86/cp-metrics/0.log" Jan 29 12:43:06 crc kubenswrapper[4753]: I0129 12:43:06.097612 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-z5drz_4b689b8d-ccc2-49e6-a449-c77e354f5f86/cp-reloader/0.log" Jan 29 12:43:06 crc kubenswrapper[4753]: I0129 12:43:06.112790 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-z5drz_4b689b8d-ccc2-49e6-a449-c77e354f5f86/cp-frr-files/0.log" Jan 29 12:43:06 crc kubenswrapper[4753]: I0129 12:43:06.123094 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-z5drz_4b689b8d-ccc2-49e6-a449-c77e354f5f86/cp-metrics/0.log" Jan 29 12:43:06 crc kubenswrapper[4753]: I0129 12:43:06.130048 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-z5drz_4b689b8d-ccc2-49e6-a449-c77e354f5f86/controller/0.log" Jan 29 12:43:06 crc kubenswrapper[4753]: I0129 12:43:06.265645 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-z5drz_4b689b8d-ccc2-49e6-a449-c77e354f5f86/frr-metrics/0.log" Jan 29 12:43:06 crc kubenswrapper[4753]: I0129 12:43:06.312377 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-z5drz_4b689b8d-ccc2-49e6-a449-c77e354f5f86/kube-rbac-proxy/0.log" Jan 29 12:43:06 crc kubenswrapper[4753]: I0129 12:43:06.498689 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-z5drz_4b689b8d-ccc2-49e6-a449-c77e354f5f86/kube-rbac-proxy-frr/0.log" Jan 29 12:43:06 crc kubenswrapper[4753]: I0129 12:43:06.608175 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-z5drz_4b689b8d-ccc2-49e6-a449-c77e354f5f86/reloader/0.log" Jan 29 12:43:06 crc kubenswrapper[4753]: I0129 12:43:06.708977 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-68df96bfd5-7k5hh_3695d573-e0e3-469a-b536-6ed32ade8e82/manager/0.log" Jan 29 12:43:06 crc kubenswrapper[4753]: I0129 12:43:06.904601 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-6787488fdd-zv44d_f743a92e-c395-4123-b0ed-58237f1b1c8f/webhook-server/0.log" Jan 29 12:43:07 crc kubenswrapper[4753]: I0129 12:43:07.006265 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-5fz59_f59f410e-2d13-4bbc-aca6-c50536d77905/kube-rbac-proxy/0.log" Jan 29 12:43:07 crc kubenswrapper[4753]: I0129 12:43:07.198178 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-z5drz_4b689b8d-ccc2-49e6-a449-c77e354f5f86/frr/0.log" Jan 29 12:43:07 crc kubenswrapper[4753]: I0129 12:43:07.384760 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-5fz59_f59f410e-2d13-4bbc-aca6-c50536d77905/speaker/0.log" Jan 29 12:43:31 crc kubenswrapper[4753]: I0129 12:43:31.475487 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dc7b4vm_01efdd65-431c-4f40-b00f-00cc75ed4682/util/0.log" Jan 29 12:43:31 crc kubenswrapper[4753]: I0129 12:43:31.700141 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dc7b4vm_01efdd65-431c-4f40-b00f-00cc75ed4682/pull/0.log" Jan 29 12:43:31 crc kubenswrapper[4753]: I0129 12:43:31.705983 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dc7b4vm_01efdd65-431c-4f40-b00f-00cc75ed4682/util/0.log" Jan 29 12:43:31 crc kubenswrapper[4753]: I0129 12:43:31.706059 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dc7b4vm_01efdd65-431c-4f40-b00f-00cc75ed4682/pull/0.log" Jan 29 12:43:31 crc kubenswrapper[4753]: I0129 12:43:31.888666 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dc7b4vm_01efdd65-431c-4f40-b00f-00cc75ed4682/util/0.log" Jan 29 12:43:31 crc kubenswrapper[4753]: I0129 12:43:31.913935 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dc7b4vm_01efdd65-431c-4f40-b00f-00cc75ed4682/pull/0.log" Jan 29 12:43:31 crc kubenswrapper[4753]: I0129 12:43:31.918047 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dc7b4vm_01efdd65-431c-4f40-b00f-00cc75ed4682/extract/0.log" Jan 29 12:43:32 crc kubenswrapper[4753]: I0129 12:43:32.076956 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-892wz_30b357a9-c353-4290-92a0-cbec35e161a0/extract-utilities/0.log" Jan 29 12:43:32 crc kubenswrapper[4753]: I0129 12:43:32.298827 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-892wz_30b357a9-c353-4290-92a0-cbec35e161a0/extract-utilities/0.log" Jan 29 12:43:32 crc kubenswrapper[4753]: I0129 12:43:32.304861 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-892wz_30b357a9-c353-4290-92a0-cbec35e161a0/extract-content/0.log" Jan 29 12:43:32 crc kubenswrapper[4753]: I0129 12:43:32.334597 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-892wz_30b357a9-c353-4290-92a0-cbec35e161a0/extract-content/0.log" Jan 29 12:43:32 crc kubenswrapper[4753]: I0129 12:43:32.488174 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-892wz_30b357a9-c353-4290-92a0-cbec35e161a0/extract-utilities/0.log" Jan 29 12:43:32 crc kubenswrapper[4753]: I0129 12:43:32.528551 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-892wz_30b357a9-c353-4290-92a0-cbec35e161a0/extract-content/0.log" Jan 29 12:43:32 crc kubenswrapper[4753]: I0129 12:43:32.686424 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-zscj5_1008e065-fc17-492d-8775-ced31b31aa22/extract-utilities/0.log" Jan 29 12:43:32 crc kubenswrapper[4753]: I0129 12:43:32.931885 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-zscj5_1008e065-fc17-492d-8775-ced31b31aa22/extract-content/0.log" Jan 29 12:43:32 crc kubenswrapper[4753]: I0129 12:43:32.950630 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-zscj5_1008e065-fc17-492d-8775-ced31b31aa22/extract-content/0.log" Jan 29 12:43:32 crc kubenswrapper[4753]: I0129 12:43:32.984050 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-zscj5_1008e065-fc17-492d-8775-ced31b31aa22/extract-utilities/0.log" Jan 29 12:43:33 crc kubenswrapper[4753]: I0129 12:43:33.036202 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-892wz_30b357a9-c353-4290-92a0-cbec35e161a0/registry-server/0.log" Jan 29 12:43:33 crc kubenswrapper[4753]: I0129 12:43:33.107602 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-zscj5_1008e065-fc17-492d-8775-ced31b31aa22/extract-utilities/0.log" Jan 29 12:43:33 crc kubenswrapper[4753]: I0129 12:43:33.127808 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-zscj5_1008e065-fc17-492d-8775-ced31b31aa22/extract-content/0.log" Jan 29 12:43:33 crc kubenswrapper[4753]: I0129 12:43:33.307183 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-kmlbh_8c12a149-f214-43c8-a40d-57c615cbe69e/marketplace-operator/0.log" Jan 29 12:43:33 crc kubenswrapper[4753]: I0129 12:43:33.485496 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-52zgz_ea63566b-9a3b-4a69-aaba-18e93b24f5c3/extract-utilities/0.log" Jan 29 12:43:33 crc kubenswrapper[4753]: I0129 12:43:33.670107 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-52zgz_ea63566b-9a3b-4a69-aaba-18e93b24f5c3/extract-utilities/0.log" Jan 29 12:43:33 crc kubenswrapper[4753]: I0129 12:43:33.728085 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-52zgz_ea63566b-9a3b-4a69-aaba-18e93b24f5c3/extract-content/0.log" Jan 29 12:43:33 crc kubenswrapper[4753]: I0129 12:43:33.763490 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-52zgz_ea63566b-9a3b-4a69-aaba-18e93b24f5c3/extract-content/0.log" Jan 29 12:43:33 crc kubenswrapper[4753]: I0129 12:43:33.772600 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-zscj5_1008e065-fc17-492d-8775-ced31b31aa22/registry-server/0.log" Jan 29 12:43:33 crc kubenswrapper[4753]: I0129 12:43:33.913559 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-52zgz_ea63566b-9a3b-4a69-aaba-18e93b24f5c3/extract-content/0.log" Jan 29 12:43:33 crc kubenswrapper[4753]: I0129 12:43:33.913758 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-52zgz_ea63566b-9a3b-4a69-aaba-18e93b24f5c3/extract-utilities/0.log" Jan 29 12:43:34 crc kubenswrapper[4753]: I0129 12:43:34.096689 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-52zgz_ea63566b-9a3b-4a69-aaba-18e93b24f5c3/registry-server/0.log" Jan 29 12:43:34 crc kubenswrapper[4753]: I0129 12:43:34.136880 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-5qfcb_4b6c93ff-5f67-421a-beca-c1b588d8535d/extract-utilities/0.log" Jan 29 12:43:34 crc kubenswrapper[4753]: I0129 12:43:34.280142 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-5qfcb_4b6c93ff-5f67-421a-beca-c1b588d8535d/extract-content/0.log" Jan 29 12:43:34 crc kubenswrapper[4753]: I0129 12:43:34.302830 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-5qfcb_4b6c93ff-5f67-421a-beca-c1b588d8535d/extract-utilities/0.log" Jan 29 12:43:34 crc kubenswrapper[4753]: I0129 12:43:34.309900 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-5qfcb_4b6c93ff-5f67-421a-beca-c1b588d8535d/extract-content/0.log" Jan 29 12:43:34 crc kubenswrapper[4753]: I0129 12:43:34.457450 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-5qfcb_4b6c93ff-5f67-421a-beca-c1b588d8535d/extract-utilities/0.log" Jan 29 12:43:34 crc kubenswrapper[4753]: I0129 12:43:34.541493 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-5qfcb_4b6c93ff-5f67-421a-beca-c1b588d8535d/extract-content/0.log" Jan 29 12:43:35 crc kubenswrapper[4753]: I0129 12:43:35.010183 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-5qfcb_4b6c93ff-5f67-421a-beca-c1b588d8535d/registry-server/0.log" Jan 29 12:43:59 crc kubenswrapper[4753]: I0129 12:43:59.253376 4753 patch_prober.go:28] interesting pod/machine-config-daemon-7c24x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 12:43:59 crc kubenswrapper[4753]: I0129 12:43:59.254047 4753 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" podUID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 12:44:17 crc kubenswrapper[4753]: I0129 12:44:17.965166 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-5glkh"] Jan 29 12:44:17 crc kubenswrapper[4753]: I0129 12:44:17.967808 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5glkh" Jan 29 12:44:17 crc kubenswrapper[4753]: I0129 12:44:17.992368 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-5glkh"] Jan 29 12:44:18 crc kubenswrapper[4753]: I0129 12:44:18.139611 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x2wmk\" (UniqueName: \"kubernetes.io/projected/d77be785-8cbb-42c7-805c-d8d75548e469-kube-api-access-x2wmk\") pod \"community-operators-5glkh\" (UID: \"d77be785-8cbb-42c7-805c-d8d75548e469\") " pod="openshift-marketplace/community-operators-5glkh" Jan 29 12:44:18 crc kubenswrapper[4753]: I0129 12:44:18.140154 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d77be785-8cbb-42c7-805c-d8d75548e469-utilities\") pod \"community-operators-5glkh\" (UID: \"d77be785-8cbb-42c7-805c-d8d75548e469\") " pod="openshift-marketplace/community-operators-5glkh" Jan 29 12:44:18 crc kubenswrapper[4753]: I0129 12:44:18.140585 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d77be785-8cbb-42c7-805c-d8d75548e469-catalog-content\") pod \"community-operators-5glkh\" (UID: \"d77be785-8cbb-42c7-805c-d8d75548e469\") " pod="openshift-marketplace/community-operators-5glkh" Jan 29 12:44:18 crc kubenswrapper[4753]: I0129 12:44:18.242024 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x2wmk\" (UniqueName: \"kubernetes.io/projected/d77be785-8cbb-42c7-805c-d8d75548e469-kube-api-access-x2wmk\") pod \"community-operators-5glkh\" (UID: \"d77be785-8cbb-42c7-805c-d8d75548e469\") " pod="openshift-marketplace/community-operators-5glkh" Jan 29 12:44:18 crc kubenswrapper[4753]: I0129 12:44:18.242474 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d77be785-8cbb-42c7-805c-d8d75548e469-utilities\") pod \"community-operators-5glkh\" (UID: \"d77be785-8cbb-42c7-805c-d8d75548e469\") " pod="openshift-marketplace/community-operators-5glkh" Jan 29 12:44:18 crc kubenswrapper[4753]: I0129 12:44:18.242640 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d77be785-8cbb-42c7-805c-d8d75548e469-catalog-content\") pod \"community-operators-5glkh\" (UID: \"d77be785-8cbb-42c7-805c-d8d75548e469\") " pod="openshift-marketplace/community-operators-5glkh" Jan 29 12:44:18 crc kubenswrapper[4753]: I0129 12:44:18.243065 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d77be785-8cbb-42c7-805c-d8d75548e469-utilities\") pod \"community-operators-5glkh\" (UID: \"d77be785-8cbb-42c7-805c-d8d75548e469\") " pod="openshift-marketplace/community-operators-5glkh" Jan 29 12:44:18 crc kubenswrapper[4753]: I0129 12:44:18.243256 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d77be785-8cbb-42c7-805c-d8d75548e469-catalog-content\") pod \"community-operators-5glkh\" (UID: \"d77be785-8cbb-42c7-805c-d8d75548e469\") " pod="openshift-marketplace/community-operators-5glkh" Jan 29 12:44:18 crc kubenswrapper[4753]: I0129 12:44:18.266583 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x2wmk\" (UniqueName: \"kubernetes.io/projected/d77be785-8cbb-42c7-805c-d8d75548e469-kube-api-access-x2wmk\") pod \"community-operators-5glkh\" (UID: \"d77be785-8cbb-42c7-805c-d8d75548e469\") " pod="openshift-marketplace/community-operators-5glkh" Jan 29 12:44:18 crc kubenswrapper[4753]: I0129 12:44:18.298632 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5glkh" Jan 29 12:44:19 crc kubenswrapper[4753]: I0129 12:44:19.234201 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-5glkh"] Jan 29 12:44:19 crc kubenswrapper[4753]: I0129 12:44:19.883046 4753 generic.go:334] "Generic (PLEG): container finished" podID="d77be785-8cbb-42c7-805c-d8d75548e469" containerID="3f3027e5d73f9c6890191ce88937e6efc53fa338d5173d7d01f8f68b9a5d789a" exitCode=0 Jan 29 12:44:19 crc kubenswrapper[4753]: I0129 12:44:19.883167 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5glkh" event={"ID":"d77be785-8cbb-42c7-805c-d8d75548e469","Type":"ContainerDied","Data":"3f3027e5d73f9c6890191ce88937e6efc53fa338d5173d7d01f8f68b9a5d789a"} Jan 29 12:44:19 crc kubenswrapper[4753]: I0129 12:44:19.883408 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5glkh" event={"ID":"d77be785-8cbb-42c7-805c-d8d75548e469","Type":"ContainerStarted","Data":"52c1bf3064776cfa739b260848bca55d1ad8bb078c686a576514f6e24b41d366"} Jan 29 12:44:19 crc kubenswrapper[4753]: I0129 12:44:19.885219 4753 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 29 12:44:22 crc kubenswrapper[4753]: I0129 12:44:22.997142 4753 generic.go:334] "Generic (PLEG): container finished" podID="d77be785-8cbb-42c7-805c-d8d75548e469" containerID="6d1ecd75fde738014cc8dc9d644452d8859b5ab88beb5a6e981aac490b25af0e" exitCode=0 Jan 29 12:44:22 crc kubenswrapper[4753]: I0129 12:44:22.997686 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5glkh" event={"ID":"d77be785-8cbb-42c7-805c-d8d75548e469","Type":"ContainerDied","Data":"6d1ecd75fde738014cc8dc9d644452d8859b5ab88beb5a6e981aac490b25af0e"} Jan 29 12:44:26 crc kubenswrapper[4753]: I0129 12:44:26.086704 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5glkh" event={"ID":"d77be785-8cbb-42c7-805c-d8d75548e469","Type":"ContainerStarted","Data":"4292dd9b6c82aef29e90206a62151a75557cdcd51f434d84a20362dc906b0eec"} Jan 29 12:44:26 crc kubenswrapper[4753]: I0129 12:44:26.123750 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-5glkh" podStartSLOduration=4.031663675 podStartE2EDuration="9.12369616s" podCreationTimestamp="2026-01-29 12:44:17 +0000 UTC" firstStartedPulling="2026-01-29 12:44:19.88477316 +0000 UTC m=+2274.136854615" lastFinishedPulling="2026-01-29 12:44:24.976805645 +0000 UTC m=+2279.228887100" observedRunningTime="2026-01-29 12:44:26.11491728 +0000 UTC m=+2280.366998745" watchObservedRunningTime="2026-01-29 12:44:26.12369616 +0000 UTC m=+2280.375777615" Jan 29 12:44:28 crc kubenswrapper[4753]: I0129 12:44:28.300952 4753 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-5glkh" Jan 29 12:44:28 crc kubenswrapper[4753]: I0129 12:44:28.302338 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-5glkh" Jan 29 12:44:28 crc kubenswrapper[4753]: I0129 12:44:28.348500 4753 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-5glkh" Jan 29 12:44:29 crc kubenswrapper[4753]: I0129 12:44:29.252764 4753 patch_prober.go:28] interesting pod/machine-config-daemon-7c24x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 12:44:29 crc kubenswrapper[4753]: I0129 12:44:29.252868 4753 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" podUID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 12:44:33 crc kubenswrapper[4753]: I0129 12:44:33.994783 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-m9km4"] Jan 29 12:44:33 crc kubenswrapper[4753]: I0129 12:44:33.996085 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-m9km4" Jan 29 12:44:34 crc kubenswrapper[4753]: I0129 12:44:34.010728 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-m9km4"] Jan 29 12:44:34 crc kubenswrapper[4753]: I0129 12:44:34.070155 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/01f0b2b4-25bf-4fc9-9630-7a1a212730c8-utilities\") pod \"certified-operators-m9km4\" (UID: \"01f0b2b4-25bf-4fc9-9630-7a1a212730c8\") " pod="openshift-marketplace/certified-operators-m9km4" Jan 29 12:44:34 crc kubenswrapper[4753]: I0129 12:44:34.070215 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dp56t\" (UniqueName: \"kubernetes.io/projected/01f0b2b4-25bf-4fc9-9630-7a1a212730c8-kube-api-access-dp56t\") pod \"certified-operators-m9km4\" (UID: \"01f0b2b4-25bf-4fc9-9630-7a1a212730c8\") " pod="openshift-marketplace/certified-operators-m9km4" Jan 29 12:44:34 crc kubenswrapper[4753]: I0129 12:44:34.070522 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/01f0b2b4-25bf-4fc9-9630-7a1a212730c8-catalog-content\") pod \"certified-operators-m9km4\" (UID: \"01f0b2b4-25bf-4fc9-9630-7a1a212730c8\") " pod="openshift-marketplace/certified-operators-m9km4" Jan 29 12:44:34 crc kubenswrapper[4753]: I0129 12:44:34.171807 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/01f0b2b4-25bf-4fc9-9630-7a1a212730c8-catalog-content\") pod \"certified-operators-m9km4\" (UID: \"01f0b2b4-25bf-4fc9-9630-7a1a212730c8\") " pod="openshift-marketplace/certified-operators-m9km4" Jan 29 12:44:34 crc kubenswrapper[4753]: I0129 12:44:34.171894 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/01f0b2b4-25bf-4fc9-9630-7a1a212730c8-utilities\") pod \"certified-operators-m9km4\" (UID: \"01f0b2b4-25bf-4fc9-9630-7a1a212730c8\") " pod="openshift-marketplace/certified-operators-m9km4" Jan 29 12:44:34 crc kubenswrapper[4753]: I0129 12:44:34.171913 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dp56t\" (UniqueName: \"kubernetes.io/projected/01f0b2b4-25bf-4fc9-9630-7a1a212730c8-kube-api-access-dp56t\") pod \"certified-operators-m9km4\" (UID: \"01f0b2b4-25bf-4fc9-9630-7a1a212730c8\") " pod="openshift-marketplace/certified-operators-m9km4" Jan 29 12:44:34 crc kubenswrapper[4753]: I0129 12:44:34.172532 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/01f0b2b4-25bf-4fc9-9630-7a1a212730c8-catalog-content\") pod \"certified-operators-m9km4\" (UID: \"01f0b2b4-25bf-4fc9-9630-7a1a212730c8\") " pod="openshift-marketplace/certified-operators-m9km4" Jan 29 12:44:34 crc kubenswrapper[4753]: I0129 12:44:34.172588 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/01f0b2b4-25bf-4fc9-9630-7a1a212730c8-utilities\") pod \"certified-operators-m9km4\" (UID: \"01f0b2b4-25bf-4fc9-9630-7a1a212730c8\") " pod="openshift-marketplace/certified-operators-m9km4" Jan 29 12:44:34 crc kubenswrapper[4753]: I0129 12:44:34.193161 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dp56t\" (UniqueName: \"kubernetes.io/projected/01f0b2b4-25bf-4fc9-9630-7a1a212730c8-kube-api-access-dp56t\") pod \"certified-operators-m9km4\" (UID: \"01f0b2b4-25bf-4fc9-9630-7a1a212730c8\") " pod="openshift-marketplace/certified-operators-m9km4" Jan 29 12:44:34 crc kubenswrapper[4753]: I0129 12:44:34.330542 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-m9km4" Jan 29 12:44:34 crc kubenswrapper[4753]: I0129 12:44:34.658256 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-m9km4"] Jan 29 12:44:34 crc kubenswrapper[4753]: W0129 12:44:34.664755 4753 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod01f0b2b4_25bf_4fc9_9630_7a1a212730c8.slice/crio-eb9b6ed7642b8d71d10b75a1f684df4e29747365c5978a368d2a7f0c6dfbe2f3 WatchSource:0}: Error finding container eb9b6ed7642b8d71d10b75a1f684df4e29747365c5978a368d2a7f0c6dfbe2f3: Status 404 returned error can't find the container with id eb9b6ed7642b8d71d10b75a1f684df4e29747365c5978a368d2a7f0c6dfbe2f3 Jan 29 12:44:35 crc kubenswrapper[4753]: I0129 12:44:35.152398 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-m9km4" event={"ID":"01f0b2b4-25bf-4fc9-9630-7a1a212730c8","Type":"ContainerStarted","Data":"eb9b6ed7642b8d71d10b75a1f684df4e29747365c5978a368d2a7f0c6dfbe2f3"} Jan 29 12:44:36 crc kubenswrapper[4753]: I0129 12:44:36.160524 4753 generic.go:334] "Generic (PLEG): container finished" podID="01f0b2b4-25bf-4fc9-9630-7a1a212730c8" containerID="b9ac7b5b631b64a81a6b5140a0d4bf7478619e8c7bc69959305d2c69af9219b4" exitCode=0 Jan 29 12:44:36 crc kubenswrapper[4753]: I0129 12:44:36.160809 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-m9km4" event={"ID":"01f0b2b4-25bf-4fc9-9630-7a1a212730c8","Type":"ContainerDied","Data":"b9ac7b5b631b64a81a6b5140a0d4bf7478619e8c7bc69959305d2c69af9219b4"} Jan 29 12:44:38 crc kubenswrapper[4753]: I0129 12:44:38.177837 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-m9km4" event={"ID":"01f0b2b4-25bf-4fc9-9630-7a1a212730c8","Type":"ContainerStarted","Data":"173bf4fbead0334e904df1a290c87a2d5d4494c8b29d28f3bf7df8fe6a0574d9"} Jan 29 12:44:38 crc kubenswrapper[4753]: I0129 12:44:38.342022 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-5glkh" Jan 29 12:44:39 crc kubenswrapper[4753]: I0129 12:44:39.187329 4753 generic.go:334] "Generic (PLEG): container finished" podID="01f0b2b4-25bf-4fc9-9630-7a1a212730c8" containerID="173bf4fbead0334e904df1a290c87a2d5d4494c8b29d28f3bf7df8fe6a0574d9" exitCode=0 Jan 29 12:44:39 crc kubenswrapper[4753]: I0129 12:44:39.187374 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-m9km4" event={"ID":"01f0b2b4-25bf-4fc9-9630-7a1a212730c8","Type":"ContainerDied","Data":"173bf4fbead0334e904df1a290c87a2d5d4494c8b29d28f3bf7df8fe6a0574d9"} Jan 29 12:44:39 crc kubenswrapper[4753]: I0129 12:44:39.369792 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-5glkh"] Jan 29 12:44:39 crc kubenswrapper[4753]: I0129 12:44:39.370129 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-5glkh" podUID="d77be785-8cbb-42c7-805c-d8d75548e469" containerName="registry-server" containerID="cri-o://4292dd9b6c82aef29e90206a62151a75557cdcd51f434d84a20362dc906b0eec" gracePeriod=2 Jan 29 12:44:39 crc kubenswrapper[4753]: I0129 12:44:39.754278 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5glkh" Jan 29 12:44:39 crc kubenswrapper[4753]: I0129 12:44:39.922645 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d77be785-8cbb-42c7-805c-d8d75548e469-catalog-content\") pod \"d77be785-8cbb-42c7-805c-d8d75548e469\" (UID: \"d77be785-8cbb-42c7-805c-d8d75548e469\") " Jan 29 12:44:39 crc kubenswrapper[4753]: I0129 12:44:39.922764 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2wmk\" (UniqueName: \"kubernetes.io/projected/d77be785-8cbb-42c7-805c-d8d75548e469-kube-api-access-x2wmk\") pod \"d77be785-8cbb-42c7-805c-d8d75548e469\" (UID: \"d77be785-8cbb-42c7-805c-d8d75548e469\") " Jan 29 12:44:39 crc kubenswrapper[4753]: I0129 12:44:39.922819 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d77be785-8cbb-42c7-805c-d8d75548e469-utilities\") pod \"d77be785-8cbb-42c7-805c-d8d75548e469\" (UID: \"d77be785-8cbb-42c7-805c-d8d75548e469\") " Jan 29 12:44:39 crc kubenswrapper[4753]: I0129 12:44:39.924269 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d77be785-8cbb-42c7-805c-d8d75548e469-utilities" (OuterVolumeSpecName: "utilities") pod "d77be785-8cbb-42c7-805c-d8d75548e469" (UID: "d77be785-8cbb-42c7-805c-d8d75548e469"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:44:39 crc kubenswrapper[4753]: I0129 12:44:39.930303 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d77be785-8cbb-42c7-805c-d8d75548e469-kube-api-access-x2wmk" (OuterVolumeSpecName: "kube-api-access-x2wmk") pod "d77be785-8cbb-42c7-805c-d8d75548e469" (UID: "d77be785-8cbb-42c7-805c-d8d75548e469"). InnerVolumeSpecName "kube-api-access-x2wmk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:44:39 crc kubenswrapper[4753]: I0129 12:44:39.977652 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d77be785-8cbb-42c7-805c-d8d75548e469-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d77be785-8cbb-42c7-805c-d8d75548e469" (UID: "d77be785-8cbb-42c7-805c-d8d75548e469"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:44:40 crc kubenswrapper[4753]: I0129 12:44:40.025025 4753 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d77be785-8cbb-42c7-805c-d8d75548e469-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 12:44:40 crc kubenswrapper[4753]: I0129 12:44:40.025072 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2wmk\" (UniqueName: \"kubernetes.io/projected/d77be785-8cbb-42c7-805c-d8d75548e469-kube-api-access-x2wmk\") on node \"crc\" DevicePath \"\"" Jan 29 12:44:40 crc kubenswrapper[4753]: I0129 12:44:40.025104 4753 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d77be785-8cbb-42c7-805c-d8d75548e469-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 12:44:40 crc kubenswrapper[4753]: I0129 12:44:40.195493 4753 generic.go:334] "Generic (PLEG): container finished" podID="d77be785-8cbb-42c7-805c-d8d75548e469" containerID="4292dd9b6c82aef29e90206a62151a75557cdcd51f434d84a20362dc906b0eec" exitCode=0 Jan 29 12:44:40 crc kubenswrapper[4753]: I0129 12:44:40.195542 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5glkh" event={"ID":"d77be785-8cbb-42c7-805c-d8d75548e469","Type":"ContainerDied","Data":"4292dd9b6c82aef29e90206a62151a75557cdcd51f434d84a20362dc906b0eec"} Jan 29 12:44:40 crc kubenswrapper[4753]: I0129 12:44:40.195592 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5glkh" Jan 29 12:44:40 crc kubenswrapper[4753]: I0129 12:44:40.195611 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5glkh" event={"ID":"d77be785-8cbb-42c7-805c-d8d75548e469","Type":"ContainerDied","Data":"52c1bf3064776cfa739b260848bca55d1ad8bb078c686a576514f6e24b41d366"} Jan 29 12:44:40 crc kubenswrapper[4753]: I0129 12:44:40.195652 4753 scope.go:117] "RemoveContainer" containerID="4292dd9b6c82aef29e90206a62151a75557cdcd51f434d84a20362dc906b0eec" Jan 29 12:44:40 crc kubenswrapper[4753]: I0129 12:44:40.201205 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-m9km4" event={"ID":"01f0b2b4-25bf-4fc9-9630-7a1a212730c8","Type":"ContainerStarted","Data":"7da782ebadc2f0ba9f5abb5fc08ffeedbaab359828d4f1dd468b042596769235"} Jan 29 12:44:40 crc kubenswrapper[4753]: I0129 12:44:40.218028 4753 scope.go:117] "RemoveContainer" containerID="6d1ecd75fde738014cc8dc9d644452d8859b5ab88beb5a6e981aac490b25af0e" Jan 29 12:44:40 crc kubenswrapper[4753]: I0129 12:44:40.237088 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-m9km4" podStartSLOduration=3.7689514600000003 podStartE2EDuration="7.237062163s" podCreationTimestamp="2026-01-29 12:44:33 +0000 UTC" firstStartedPulling="2026-01-29 12:44:36.162593063 +0000 UTC m=+2290.414674518" lastFinishedPulling="2026-01-29 12:44:39.630703766 +0000 UTC m=+2293.882785221" observedRunningTime="2026-01-29 12:44:40.233637826 +0000 UTC m=+2294.485719291" watchObservedRunningTime="2026-01-29 12:44:40.237062163 +0000 UTC m=+2294.489143638" Jan 29 12:44:40 crc kubenswrapper[4753]: I0129 12:44:40.254765 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-5glkh"] Jan 29 12:44:40 crc kubenswrapper[4753]: I0129 12:44:40.257495 4753 scope.go:117] "RemoveContainer" containerID="3f3027e5d73f9c6890191ce88937e6efc53fa338d5173d7d01f8f68b9a5d789a" Jan 29 12:44:40 crc kubenswrapper[4753]: I0129 12:44:40.258062 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-5glkh"] Jan 29 12:44:40 crc kubenswrapper[4753]: I0129 12:44:40.271020 4753 scope.go:117] "RemoveContainer" containerID="4292dd9b6c82aef29e90206a62151a75557cdcd51f434d84a20362dc906b0eec" Jan 29 12:44:40 crc kubenswrapper[4753]: E0129 12:44:40.271540 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4292dd9b6c82aef29e90206a62151a75557cdcd51f434d84a20362dc906b0eec\": container with ID starting with 4292dd9b6c82aef29e90206a62151a75557cdcd51f434d84a20362dc906b0eec not found: ID does not exist" containerID="4292dd9b6c82aef29e90206a62151a75557cdcd51f434d84a20362dc906b0eec" Jan 29 12:44:40 crc kubenswrapper[4753]: I0129 12:44:40.271599 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4292dd9b6c82aef29e90206a62151a75557cdcd51f434d84a20362dc906b0eec"} err="failed to get container status \"4292dd9b6c82aef29e90206a62151a75557cdcd51f434d84a20362dc906b0eec\": rpc error: code = NotFound desc = could not find container \"4292dd9b6c82aef29e90206a62151a75557cdcd51f434d84a20362dc906b0eec\": container with ID starting with 4292dd9b6c82aef29e90206a62151a75557cdcd51f434d84a20362dc906b0eec not found: ID does not exist" Jan 29 12:44:40 crc kubenswrapper[4753]: I0129 12:44:40.271638 4753 scope.go:117] "RemoveContainer" containerID="6d1ecd75fde738014cc8dc9d644452d8859b5ab88beb5a6e981aac490b25af0e" Jan 29 12:44:40 crc kubenswrapper[4753]: E0129 12:44:40.271860 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6d1ecd75fde738014cc8dc9d644452d8859b5ab88beb5a6e981aac490b25af0e\": container with ID starting with 6d1ecd75fde738014cc8dc9d644452d8859b5ab88beb5a6e981aac490b25af0e not found: ID does not exist" containerID="6d1ecd75fde738014cc8dc9d644452d8859b5ab88beb5a6e981aac490b25af0e" Jan 29 12:44:40 crc kubenswrapper[4753]: I0129 12:44:40.271892 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6d1ecd75fde738014cc8dc9d644452d8859b5ab88beb5a6e981aac490b25af0e"} err="failed to get container status \"6d1ecd75fde738014cc8dc9d644452d8859b5ab88beb5a6e981aac490b25af0e\": rpc error: code = NotFound desc = could not find container \"6d1ecd75fde738014cc8dc9d644452d8859b5ab88beb5a6e981aac490b25af0e\": container with ID starting with 6d1ecd75fde738014cc8dc9d644452d8859b5ab88beb5a6e981aac490b25af0e not found: ID does not exist" Jan 29 12:44:40 crc kubenswrapper[4753]: I0129 12:44:40.271911 4753 scope.go:117] "RemoveContainer" containerID="3f3027e5d73f9c6890191ce88937e6efc53fa338d5173d7d01f8f68b9a5d789a" Jan 29 12:44:40 crc kubenswrapper[4753]: E0129 12:44:40.272203 4753 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3f3027e5d73f9c6890191ce88937e6efc53fa338d5173d7d01f8f68b9a5d789a\": container with ID starting with 3f3027e5d73f9c6890191ce88937e6efc53fa338d5173d7d01f8f68b9a5d789a not found: ID does not exist" containerID="3f3027e5d73f9c6890191ce88937e6efc53fa338d5173d7d01f8f68b9a5d789a" Jan 29 12:44:40 crc kubenswrapper[4753]: I0129 12:44:40.272286 4753 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3f3027e5d73f9c6890191ce88937e6efc53fa338d5173d7d01f8f68b9a5d789a"} err="failed to get container status \"3f3027e5d73f9c6890191ce88937e6efc53fa338d5173d7d01f8f68b9a5d789a\": rpc error: code = NotFound desc = could not find container \"3f3027e5d73f9c6890191ce88937e6efc53fa338d5173d7d01f8f68b9a5d789a\": container with ID starting with 3f3027e5d73f9c6890191ce88937e6efc53fa338d5173d7d01f8f68b9a5d789a not found: ID does not exist" Jan 29 12:44:41 crc kubenswrapper[4753]: I0129 12:44:41.896209 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d77be785-8cbb-42c7-805c-d8d75548e469" path="/var/lib/kubelet/pods/d77be785-8cbb-42c7-805c-d8d75548e469/volumes" Jan 29 12:44:44 crc kubenswrapper[4753]: I0129 12:44:44.331677 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-m9km4" Jan 29 12:44:44 crc kubenswrapper[4753]: I0129 12:44:44.333433 4753 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-m9km4" Jan 29 12:44:44 crc kubenswrapper[4753]: I0129 12:44:44.382932 4753 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-m9km4" Jan 29 12:44:45 crc kubenswrapper[4753]: I0129 12:44:45.276916 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-m9km4" Jan 29 12:44:45 crc kubenswrapper[4753]: I0129 12:44:45.571045 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-m9km4"] Jan 29 12:44:47 crc kubenswrapper[4753]: I0129 12:44:47.249346 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-m9km4" podUID="01f0b2b4-25bf-4fc9-9630-7a1a212730c8" containerName="registry-server" containerID="cri-o://7da782ebadc2f0ba9f5abb5fc08ffeedbaab359828d4f1dd468b042596769235" gracePeriod=2 Jan 29 12:44:48 crc kubenswrapper[4753]: I0129 12:44:48.258731 4753 generic.go:334] "Generic (PLEG): container finished" podID="01f0b2b4-25bf-4fc9-9630-7a1a212730c8" containerID="7da782ebadc2f0ba9f5abb5fc08ffeedbaab359828d4f1dd468b042596769235" exitCode=0 Jan 29 12:44:48 crc kubenswrapper[4753]: I0129 12:44:48.258799 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-m9km4" event={"ID":"01f0b2b4-25bf-4fc9-9630-7a1a212730c8","Type":"ContainerDied","Data":"7da782ebadc2f0ba9f5abb5fc08ffeedbaab359828d4f1dd468b042596769235"} Jan 29 12:44:48 crc kubenswrapper[4753]: I0129 12:44:48.542803 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-m9km4" Jan 29 12:44:48 crc kubenswrapper[4753]: I0129 12:44:48.739865 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/01f0b2b4-25bf-4fc9-9630-7a1a212730c8-catalog-content\") pod \"01f0b2b4-25bf-4fc9-9630-7a1a212730c8\" (UID: \"01f0b2b4-25bf-4fc9-9630-7a1a212730c8\") " Jan 29 12:44:48 crc kubenswrapper[4753]: I0129 12:44:48.739993 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/01f0b2b4-25bf-4fc9-9630-7a1a212730c8-utilities\") pod \"01f0b2b4-25bf-4fc9-9630-7a1a212730c8\" (UID: \"01f0b2b4-25bf-4fc9-9630-7a1a212730c8\") " Jan 29 12:44:48 crc kubenswrapper[4753]: I0129 12:44:48.740054 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dp56t\" (UniqueName: \"kubernetes.io/projected/01f0b2b4-25bf-4fc9-9630-7a1a212730c8-kube-api-access-dp56t\") pod \"01f0b2b4-25bf-4fc9-9630-7a1a212730c8\" (UID: \"01f0b2b4-25bf-4fc9-9630-7a1a212730c8\") " Jan 29 12:44:48 crc kubenswrapper[4753]: I0129 12:44:48.741042 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/01f0b2b4-25bf-4fc9-9630-7a1a212730c8-utilities" (OuterVolumeSpecName: "utilities") pod "01f0b2b4-25bf-4fc9-9630-7a1a212730c8" (UID: "01f0b2b4-25bf-4fc9-9630-7a1a212730c8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:44:48 crc kubenswrapper[4753]: I0129 12:44:48.752447 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01f0b2b4-25bf-4fc9-9630-7a1a212730c8-kube-api-access-dp56t" (OuterVolumeSpecName: "kube-api-access-dp56t") pod "01f0b2b4-25bf-4fc9-9630-7a1a212730c8" (UID: "01f0b2b4-25bf-4fc9-9630-7a1a212730c8"). InnerVolumeSpecName "kube-api-access-dp56t". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:44:48 crc kubenswrapper[4753]: I0129 12:44:48.842172 4753 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/01f0b2b4-25bf-4fc9-9630-7a1a212730c8-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 12:44:48 crc kubenswrapper[4753]: I0129 12:44:48.842531 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dp56t\" (UniqueName: \"kubernetes.io/projected/01f0b2b4-25bf-4fc9-9630-7a1a212730c8-kube-api-access-dp56t\") on node \"crc\" DevicePath \"\"" Jan 29 12:44:49 crc kubenswrapper[4753]: I0129 12:44:49.266791 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-m9km4" event={"ID":"01f0b2b4-25bf-4fc9-9630-7a1a212730c8","Type":"ContainerDied","Data":"eb9b6ed7642b8d71d10b75a1f684df4e29747365c5978a368d2a7f0c6dfbe2f3"} Jan 29 12:44:49 crc kubenswrapper[4753]: I0129 12:44:49.266850 4753 scope.go:117] "RemoveContainer" containerID="7da782ebadc2f0ba9f5abb5fc08ffeedbaab359828d4f1dd468b042596769235" Jan 29 12:44:49 crc kubenswrapper[4753]: I0129 12:44:49.266874 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-m9km4" Jan 29 12:44:49 crc kubenswrapper[4753]: I0129 12:44:49.302790 4753 scope.go:117] "RemoveContainer" containerID="173bf4fbead0334e904df1a290c87a2d5d4494c8b29d28f3bf7df8fe6a0574d9" Jan 29 12:44:49 crc kubenswrapper[4753]: I0129 12:44:49.320883 4753 scope.go:117] "RemoveContainer" containerID="b9ac7b5b631b64a81a6b5140a0d4bf7478619e8c7bc69959305d2c69af9219b4" Jan 29 12:44:49 crc kubenswrapper[4753]: I0129 12:44:49.989110 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/01f0b2b4-25bf-4fc9-9630-7a1a212730c8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "01f0b2b4-25bf-4fc9-9630-7a1a212730c8" (UID: "01f0b2b4-25bf-4fc9-9630-7a1a212730c8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:44:50 crc kubenswrapper[4753]: I0129 12:44:50.085747 4753 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/01f0b2b4-25bf-4fc9-9630-7a1a212730c8-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 12:44:50 crc kubenswrapper[4753]: I0129 12:44:50.240217 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-m9km4"] Jan 29 12:44:50 crc kubenswrapper[4753]: I0129 12:44:50.248719 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-m9km4"] Jan 29 12:44:51 crc kubenswrapper[4753]: I0129 12:44:51.897553 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01f0b2b4-25bf-4fc9-9630-7a1a212730c8" path="/var/lib/kubelet/pods/01f0b2b4-25bf-4fc9-9630-7a1a212730c8/volumes" Jan 29 12:44:58 crc kubenswrapper[4753]: I0129 12:44:58.378984 4753 generic.go:334] "Generic (PLEG): container finished" podID="e66361cf-7d08-49f4-9571-3450b52d2f56" containerID="1c1735a2355ec096e143310a1896f58b5c8c4311180ba57e66dbfd510f5b722c" exitCode=0 Jan 29 12:44:58 crc kubenswrapper[4753]: I0129 12:44:58.379048 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-6dx95/must-gather-8h8qb" event={"ID":"e66361cf-7d08-49f4-9571-3450b52d2f56","Type":"ContainerDied","Data":"1c1735a2355ec096e143310a1896f58b5c8c4311180ba57e66dbfd510f5b722c"} Jan 29 12:44:58 crc kubenswrapper[4753]: I0129 12:44:58.380192 4753 scope.go:117] "RemoveContainer" containerID="1c1735a2355ec096e143310a1896f58b5c8c4311180ba57e66dbfd510f5b722c" Jan 29 12:44:58 crc kubenswrapper[4753]: I0129 12:44:58.988896 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-6dx95_must-gather-8h8qb_e66361cf-7d08-49f4-9571-3450b52d2f56/gather/0.log" Jan 29 12:44:59 crc kubenswrapper[4753]: I0129 12:44:59.262847 4753 patch_prober.go:28] interesting pod/machine-config-daemon-7c24x container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 12:44:59 crc kubenswrapper[4753]: I0129 12:44:59.262906 4753 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" podUID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 12:44:59 crc kubenswrapper[4753]: I0129 12:44:59.262954 4753 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" Jan 29 12:44:59 crc kubenswrapper[4753]: I0129 12:44:59.263694 4753 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"c9612c284467c21e6e66f796daa879bac36be707c5a1e8ee913fb0adc13ec2b7"} pod="openshift-machine-config-operator/machine-config-daemon-7c24x" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 29 12:44:59 crc kubenswrapper[4753]: I0129 12:44:59.263751 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" podUID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" containerName="machine-config-daemon" containerID="cri-o://c9612c284467c21e6e66f796daa879bac36be707c5a1e8ee913fb0adc13ec2b7" gracePeriod=600 Jan 29 12:44:59 crc kubenswrapper[4753]: E0129 12:44:59.484136 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7c24x_openshift-machine-config-operator(b0310995-a7c7-47c3-ae6c-05daaaba92a6)\"" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" podUID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" Jan 29 12:45:00 crc kubenswrapper[4753]: I0129 12:45:00.149327 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494845-lbqdr"] Jan 29 12:45:00 crc kubenswrapper[4753]: E0129 12:45:00.149981 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d77be785-8cbb-42c7-805c-d8d75548e469" containerName="extract-content" Jan 29 12:45:00 crc kubenswrapper[4753]: I0129 12:45:00.150010 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="d77be785-8cbb-42c7-805c-d8d75548e469" containerName="extract-content" Jan 29 12:45:00 crc kubenswrapper[4753]: E0129 12:45:00.150030 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="01f0b2b4-25bf-4fc9-9630-7a1a212730c8" containerName="extract-utilities" Jan 29 12:45:00 crc kubenswrapper[4753]: I0129 12:45:00.150037 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="01f0b2b4-25bf-4fc9-9630-7a1a212730c8" containerName="extract-utilities" Jan 29 12:45:00 crc kubenswrapper[4753]: E0129 12:45:00.150062 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="01f0b2b4-25bf-4fc9-9630-7a1a212730c8" containerName="registry-server" Jan 29 12:45:00 crc kubenswrapper[4753]: I0129 12:45:00.150068 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="01f0b2b4-25bf-4fc9-9630-7a1a212730c8" containerName="registry-server" Jan 29 12:45:00 crc kubenswrapper[4753]: E0129 12:45:00.150079 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d77be785-8cbb-42c7-805c-d8d75548e469" containerName="registry-server" Jan 29 12:45:00 crc kubenswrapper[4753]: I0129 12:45:00.150086 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="d77be785-8cbb-42c7-805c-d8d75548e469" containerName="registry-server" Jan 29 12:45:00 crc kubenswrapper[4753]: E0129 12:45:00.150094 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="01f0b2b4-25bf-4fc9-9630-7a1a212730c8" containerName="extract-content" Jan 29 12:45:00 crc kubenswrapper[4753]: I0129 12:45:00.150100 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="01f0b2b4-25bf-4fc9-9630-7a1a212730c8" containerName="extract-content" Jan 29 12:45:00 crc kubenswrapper[4753]: E0129 12:45:00.150112 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d77be785-8cbb-42c7-805c-d8d75548e469" containerName="extract-utilities" Jan 29 12:45:00 crc kubenswrapper[4753]: I0129 12:45:00.150118 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="d77be785-8cbb-42c7-805c-d8d75548e469" containerName="extract-utilities" Jan 29 12:45:00 crc kubenswrapper[4753]: I0129 12:45:00.150246 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="01f0b2b4-25bf-4fc9-9630-7a1a212730c8" containerName="registry-server" Jan 29 12:45:00 crc kubenswrapper[4753]: I0129 12:45:00.150271 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="d77be785-8cbb-42c7-805c-d8d75548e469" containerName="registry-server" Jan 29 12:45:00 crc kubenswrapper[4753]: I0129 12:45:00.150741 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494845-lbqdr" Jan 29 12:45:00 crc kubenswrapper[4753]: I0129 12:45:00.154002 4753 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 29 12:45:00 crc kubenswrapper[4753]: I0129 12:45:00.154608 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494845-lbqdr"] Jan 29 12:45:00 crc kubenswrapper[4753]: I0129 12:45:00.155364 4753 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 29 12:45:00 crc kubenswrapper[4753]: I0129 12:45:00.229897 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/11b3cff0-3079-4e2f-8944-05f4441d5c00-secret-volume\") pod \"collect-profiles-29494845-lbqdr\" (UID: \"11b3cff0-3079-4e2f-8944-05f4441d5c00\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494845-lbqdr" Jan 29 12:45:00 crc kubenswrapper[4753]: I0129 12:45:00.229972 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mgpfr\" (UniqueName: \"kubernetes.io/projected/11b3cff0-3079-4e2f-8944-05f4441d5c00-kube-api-access-mgpfr\") pod \"collect-profiles-29494845-lbqdr\" (UID: \"11b3cff0-3079-4e2f-8944-05f4441d5c00\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494845-lbqdr" Jan 29 12:45:00 crc kubenswrapper[4753]: I0129 12:45:00.230005 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/11b3cff0-3079-4e2f-8944-05f4441d5c00-config-volume\") pod \"collect-profiles-29494845-lbqdr\" (UID: \"11b3cff0-3079-4e2f-8944-05f4441d5c00\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494845-lbqdr" Jan 29 12:45:00 crc kubenswrapper[4753]: I0129 12:45:00.331307 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/11b3cff0-3079-4e2f-8944-05f4441d5c00-secret-volume\") pod \"collect-profiles-29494845-lbqdr\" (UID: \"11b3cff0-3079-4e2f-8944-05f4441d5c00\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494845-lbqdr" Jan 29 12:45:00 crc kubenswrapper[4753]: I0129 12:45:00.331394 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mgpfr\" (UniqueName: \"kubernetes.io/projected/11b3cff0-3079-4e2f-8944-05f4441d5c00-kube-api-access-mgpfr\") pod \"collect-profiles-29494845-lbqdr\" (UID: \"11b3cff0-3079-4e2f-8944-05f4441d5c00\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494845-lbqdr" Jan 29 12:45:00 crc kubenswrapper[4753]: I0129 12:45:00.331436 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/11b3cff0-3079-4e2f-8944-05f4441d5c00-config-volume\") pod \"collect-profiles-29494845-lbqdr\" (UID: \"11b3cff0-3079-4e2f-8944-05f4441d5c00\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494845-lbqdr" Jan 29 12:45:00 crc kubenswrapper[4753]: I0129 12:45:00.332556 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/11b3cff0-3079-4e2f-8944-05f4441d5c00-config-volume\") pod \"collect-profiles-29494845-lbqdr\" (UID: \"11b3cff0-3079-4e2f-8944-05f4441d5c00\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494845-lbqdr" Jan 29 12:45:00 crc kubenswrapper[4753]: I0129 12:45:00.339574 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/11b3cff0-3079-4e2f-8944-05f4441d5c00-secret-volume\") pod \"collect-profiles-29494845-lbqdr\" (UID: \"11b3cff0-3079-4e2f-8944-05f4441d5c00\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494845-lbqdr" Jan 29 12:45:00 crc kubenswrapper[4753]: I0129 12:45:00.358800 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mgpfr\" (UniqueName: \"kubernetes.io/projected/11b3cff0-3079-4e2f-8944-05f4441d5c00-kube-api-access-mgpfr\") pod \"collect-profiles-29494845-lbqdr\" (UID: \"11b3cff0-3079-4e2f-8944-05f4441d5c00\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494845-lbqdr" Jan 29 12:45:00 crc kubenswrapper[4753]: I0129 12:45:00.402255 4753 generic.go:334] "Generic (PLEG): container finished" podID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" containerID="c9612c284467c21e6e66f796daa879bac36be707c5a1e8ee913fb0adc13ec2b7" exitCode=0 Jan 29 12:45:00 crc kubenswrapper[4753]: I0129 12:45:00.402338 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" event={"ID":"b0310995-a7c7-47c3-ae6c-05daaaba92a6","Type":"ContainerDied","Data":"c9612c284467c21e6e66f796daa879bac36be707c5a1e8ee913fb0adc13ec2b7"} Jan 29 12:45:00 crc kubenswrapper[4753]: I0129 12:45:00.402449 4753 scope.go:117] "RemoveContainer" containerID="68d8fb44353981db7ed428c16c1be1006209ecd529ce7dc45390bb208bf8ee08" Jan 29 12:45:00 crc kubenswrapper[4753]: I0129 12:45:00.403293 4753 scope.go:117] "RemoveContainer" containerID="c9612c284467c21e6e66f796daa879bac36be707c5a1e8ee913fb0adc13ec2b7" Jan 29 12:45:00 crc kubenswrapper[4753]: E0129 12:45:00.403573 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7c24x_openshift-machine-config-operator(b0310995-a7c7-47c3-ae6c-05daaaba92a6)\"" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" podUID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" Jan 29 12:45:00 crc kubenswrapper[4753]: I0129 12:45:00.476480 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494845-lbqdr" Jan 29 12:45:00 crc kubenswrapper[4753]: I0129 12:45:00.701499 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494845-lbqdr"] Jan 29 12:45:01 crc kubenswrapper[4753]: I0129 12:45:01.410587 4753 generic.go:334] "Generic (PLEG): container finished" podID="11b3cff0-3079-4e2f-8944-05f4441d5c00" containerID="e7eaff825f17ccbedac7fd8c7217590768df5c7a0d92f1a546ae7378f6167e46" exitCode=0 Jan 29 12:45:01 crc kubenswrapper[4753]: I0129 12:45:01.410774 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494845-lbqdr" event={"ID":"11b3cff0-3079-4e2f-8944-05f4441d5c00","Type":"ContainerDied","Data":"e7eaff825f17ccbedac7fd8c7217590768df5c7a0d92f1a546ae7378f6167e46"} Jan 29 12:45:01 crc kubenswrapper[4753]: I0129 12:45:01.410956 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494845-lbqdr" event={"ID":"11b3cff0-3079-4e2f-8944-05f4441d5c00","Type":"ContainerStarted","Data":"84ef54d62fb4f7247d0d26a34de22cf8b49557f055d042071f5abad395b670e8"} Jan 29 12:45:02 crc kubenswrapper[4753]: I0129 12:45:02.690442 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494845-lbqdr" Jan 29 12:45:02 crc kubenswrapper[4753]: I0129 12:45:02.765475 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mgpfr\" (UniqueName: \"kubernetes.io/projected/11b3cff0-3079-4e2f-8944-05f4441d5c00-kube-api-access-mgpfr\") pod \"11b3cff0-3079-4e2f-8944-05f4441d5c00\" (UID: \"11b3cff0-3079-4e2f-8944-05f4441d5c00\") " Jan 29 12:45:02 crc kubenswrapper[4753]: I0129 12:45:02.765596 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/11b3cff0-3079-4e2f-8944-05f4441d5c00-secret-volume\") pod \"11b3cff0-3079-4e2f-8944-05f4441d5c00\" (UID: \"11b3cff0-3079-4e2f-8944-05f4441d5c00\") " Jan 29 12:45:02 crc kubenswrapper[4753]: I0129 12:45:02.765667 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/11b3cff0-3079-4e2f-8944-05f4441d5c00-config-volume\") pod \"11b3cff0-3079-4e2f-8944-05f4441d5c00\" (UID: \"11b3cff0-3079-4e2f-8944-05f4441d5c00\") " Jan 29 12:45:02 crc kubenswrapper[4753]: I0129 12:45:02.772274 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/11b3cff0-3079-4e2f-8944-05f4441d5c00-config-volume" (OuterVolumeSpecName: "config-volume") pod "11b3cff0-3079-4e2f-8944-05f4441d5c00" (UID: "11b3cff0-3079-4e2f-8944-05f4441d5c00"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 12:45:02 crc kubenswrapper[4753]: I0129 12:45:02.772305 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/11b3cff0-3079-4e2f-8944-05f4441d5c00-kube-api-access-mgpfr" (OuterVolumeSpecName: "kube-api-access-mgpfr") pod "11b3cff0-3079-4e2f-8944-05f4441d5c00" (UID: "11b3cff0-3079-4e2f-8944-05f4441d5c00"). InnerVolumeSpecName "kube-api-access-mgpfr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:45:02 crc kubenswrapper[4753]: I0129 12:45:02.774454 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/11b3cff0-3079-4e2f-8944-05f4441d5c00-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "11b3cff0-3079-4e2f-8944-05f4441d5c00" (UID: "11b3cff0-3079-4e2f-8944-05f4441d5c00"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 12:45:02 crc kubenswrapper[4753]: I0129 12:45:02.867885 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mgpfr\" (UniqueName: \"kubernetes.io/projected/11b3cff0-3079-4e2f-8944-05f4441d5c00-kube-api-access-mgpfr\") on node \"crc\" DevicePath \"\"" Jan 29 12:45:02 crc kubenswrapper[4753]: I0129 12:45:02.867930 4753 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/11b3cff0-3079-4e2f-8944-05f4441d5c00-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 29 12:45:02 crc kubenswrapper[4753]: I0129 12:45:02.867946 4753 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/11b3cff0-3079-4e2f-8944-05f4441d5c00-config-volume\") on node \"crc\" DevicePath \"\"" Jan 29 12:45:03 crc kubenswrapper[4753]: I0129 12:45:03.426146 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494845-lbqdr" event={"ID":"11b3cff0-3079-4e2f-8944-05f4441d5c00","Type":"ContainerDied","Data":"84ef54d62fb4f7247d0d26a34de22cf8b49557f055d042071f5abad395b670e8"} Jan 29 12:45:03 crc kubenswrapper[4753]: I0129 12:45:03.426193 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494845-lbqdr" Jan 29 12:45:03 crc kubenswrapper[4753]: I0129 12:45:03.426198 4753 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="84ef54d62fb4f7247d0d26a34de22cf8b49557f055d042071f5abad395b670e8" Jan 29 12:45:03 crc kubenswrapper[4753]: I0129 12:45:03.762463 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494800-86kw2"] Jan 29 12:45:03 crc kubenswrapper[4753]: I0129 12:45:03.767376 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494800-86kw2"] Jan 29 12:45:03 crc kubenswrapper[4753]: I0129 12:45:03.896273 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4f48974c-8787-482a-b962-f7646e12952e" path="/var/lib/kubelet/pods/4f48974c-8787-482a-b962-f7646e12952e/volumes" Jan 29 12:45:08 crc kubenswrapper[4753]: I0129 12:45:08.713709 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-6dx95/must-gather-8h8qb"] Jan 29 12:45:08 crc kubenswrapper[4753]: I0129 12:45:08.715315 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-6dx95/must-gather-8h8qb" podUID="e66361cf-7d08-49f4-9571-3450b52d2f56" containerName="copy" containerID="cri-o://312360de953fbe1c2c4681655fb80e978370d960b8473057aa55ae1d4ed06b31" gracePeriod=2 Jan 29 12:45:08 crc kubenswrapper[4753]: I0129 12:45:08.718959 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-6dx95/must-gather-8h8qb"] Jan 29 12:45:09 crc kubenswrapper[4753]: I0129 12:45:09.564152 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-6dx95_must-gather-8h8qb_e66361cf-7d08-49f4-9571-3450b52d2f56/copy/0.log" Jan 29 12:45:09 crc kubenswrapper[4753]: I0129 12:45:09.568607 4753 generic.go:334] "Generic (PLEG): container finished" podID="e66361cf-7d08-49f4-9571-3450b52d2f56" containerID="312360de953fbe1c2c4681655fb80e978370d960b8473057aa55ae1d4ed06b31" exitCode=143 Jan 29 12:45:09 crc kubenswrapper[4753]: I0129 12:45:09.833564 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-6dx95_must-gather-8h8qb_e66361cf-7d08-49f4-9571-3450b52d2f56/copy/0.log" Jan 29 12:45:09 crc kubenswrapper[4753]: I0129 12:45:09.834989 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-6dx95/must-gather-8h8qb" Jan 29 12:45:09 crc kubenswrapper[4753]: I0129 12:45:09.873387 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/e66361cf-7d08-49f4-9571-3450b52d2f56-must-gather-output\") pod \"e66361cf-7d08-49f4-9571-3450b52d2f56\" (UID: \"e66361cf-7d08-49f4-9571-3450b52d2f56\") " Jan 29 12:45:09 crc kubenswrapper[4753]: I0129 12:45:09.873483 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qzjwd\" (UniqueName: \"kubernetes.io/projected/e66361cf-7d08-49f4-9571-3450b52d2f56-kube-api-access-qzjwd\") pod \"e66361cf-7d08-49f4-9571-3450b52d2f56\" (UID: \"e66361cf-7d08-49f4-9571-3450b52d2f56\") " Jan 29 12:45:09 crc kubenswrapper[4753]: I0129 12:45:09.880016 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e66361cf-7d08-49f4-9571-3450b52d2f56-kube-api-access-qzjwd" (OuterVolumeSpecName: "kube-api-access-qzjwd") pod "e66361cf-7d08-49f4-9571-3450b52d2f56" (UID: "e66361cf-7d08-49f4-9571-3450b52d2f56"). InnerVolumeSpecName "kube-api-access-qzjwd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:45:09 crc kubenswrapper[4753]: I0129 12:45:09.956287 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e66361cf-7d08-49f4-9571-3450b52d2f56-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "e66361cf-7d08-49f4-9571-3450b52d2f56" (UID: "e66361cf-7d08-49f4-9571-3450b52d2f56"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:45:09 crc kubenswrapper[4753]: I0129 12:45:09.975838 4753 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/e66361cf-7d08-49f4-9571-3450b52d2f56-must-gather-output\") on node \"crc\" DevicePath \"\"" Jan 29 12:45:09 crc kubenswrapper[4753]: I0129 12:45:09.975874 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qzjwd\" (UniqueName: \"kubernetes.io/projected/e66361cf-7d08-49f4-9571-3450b52d2f56-kube-api-access-qzjwd\") on node \"crc\" DevicePath \"\"" Jan 29 12:45:10 crc kubenswrapper[4753]: I0129 12:45:10.577077 4753 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-6dx95_must-gather-8h8qb_e66361cf-7d08-49f4-9571-3450b52d2f56/copy/0.log" Jan 29 12:45:10 crc kubenswrapper[4753]: I0129 12:45:10.577596 4753 scope.go:117] "RemoveContainer" containerID="312360de953fbe1c2c4681655fb80e978370d960b8473057aa55ae1d4ed06b31" Jan 29 12:45:10 crc kubenswrapper[4753]: I0129 12:45:10.577608 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-6dx95/must-gather-8h8qb" Jan 29 12:45:10 crc kubenswrapper[4753]: I0129 12:45:10.607400 4753 scope.go:117] "RemoveContainer" containerID="1c1735a2355ec096e143310a1896f58b5c8c4311180ba57e66dbfd510f5b722c" Jan 29 12:45:11 crc kubenswrapper[4753]: I0129 12:45:11.901920 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e66361cf-7d08-49f4-9571-3450b52d2f56" path="/var/lib/kubelet/pods/e66361cf-7d08-49f4-9571-3450b52d2f56/volumes" Jan 29 12:45:14 crc kubenswrapper[4753]: I0129 12:45:14.526780 4753 scope.go:117] "RemoveContainer" containerID="c9612c284467c21e6e66f796daa879bac36be707c5a1e8ee913fb0adc13ec2b7" Jan 29 12:45:14 crc kubenswrapper[4753]: E0129 12:45:14.527033 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7c24x_openshift-machine-config-operator(b0310995-a7c7-47c3-ae6c-05daaaba92a6)\"" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" podUID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" Jan 29 12:45:24 crc kubenswrapper[4753]: I0129 12:45:24.889585 4753 scope.go:117] "RemoveContainer" containerID="c9612c284467c21e6e66f796daa879bac36be707c5a1e8ee913fb0adc13ec2b7" Jan 29 12:45:24 crc kubenswrapper[4753]: E0129 12:45:24.890951 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7c24x_openshift-machine-config-operator(b0310995-a7c7-47c3-ae6c-05daaaba92a6)\"" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" podUID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" Jan 29 12:45:33 crc kubenswrapper[4753]: I0129 12:45:33.946040 4753 scope.go:117] "RemoveContainer" containerID="622532cb2281d0d8e36ef13ffa7a1468db1e0cd54769f22c744d688794ab718f" Jan 29 12:45:38 crc kubenswrapper[4753]: I0129 12:45:38.889839 4753 scope.go:117] "RemoveContainer" containerID="c9612c284467c21e6e66f796daa879bac36be707c5a1e8ee913fb0adc13ec2b7" Jan 29 12:45:38 crc kubenswrapper[4753]: E0129 12:45:38.890463 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7c24x_openshift-machine-config-operator(b0310995-a7c7-47c3-ae6c-05daaaba92a6)\"" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" podUID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" Jan 29 12:45:50 crc kubenswrapper[4753]: I0129 12:45:50.889160 4753 scope.go:117] "RemoveContainer" containerID="c9612c284467c21e6e66f796daa879bac36be707c5a1e8ee913fb0adc13ec2b7" Jan 29 12:45:50 crc kubenswrapper[4753]: E0129 12:45:50.890086 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7c24x_openshift-machine-config-operator(b0310995-a7c7-47c3-ae6c-05daaaba92a6)\"" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" podUID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" Jan 29 12:46:04 crc kubenswrapper[4753]: I0129 12:46:04.888832 4753 scope.go:117] "RemoveContainer" containerID="c9612c284467c21e6e66f796daa879bac36be707c5a1e8ee913fb0adc13ec2b7" Jan 29 12:46:04 crc kubenswrapper[4753]: E0129 12:46:04.889867 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7c24x_openshift-machine-config-operator(b0310995-a7c7-47c3-ae6c-05daaaba92a6)\"" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" podUID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" Jan 29 12:46:15 crc kubenswrapper[4753]: I0129 12:46:15.890807 4753 scope.go:117] "RemoveContainer" containerID="c9612c284467c21e6e66f796daa879bac36be707c5a1e8ee913fb0adc13ec2b7" Jan 29 12:46:15 crc kubenswrapper[4753]: E0129 12:46:15.891772 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7c24x_openshift-machine-config-operator(b0310995-a7c7-47c3-ae6c-05daaaba92a6)\"" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" podUID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" Jan 29 12:46:28 crc kubenswrapper[4753]: I0129 12:46:28.888772 4753 scope.go:117] "RemoveContainer" containerID="c9612c284467c21e6e66f796daa879bac36be707c5a1e8ee913fb0adc13ec2b7" Jan 29 12:46:28 crc kubenswrapper[4753]: E0129 12:46:28.889548 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7c24x_openshift-machine-config-operator(b0310995-a7c7-47c3-ae6c-05daaaba92a6)\"" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" podUID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" Jan 29 12:46:39 crc kubenswrapper[4753]: I0129 12:46:39.888555 4753 scope.go:117] "RemoveContainer" containerID="c9612c284467c21e6e66f796daa879bac36be707c5a1e8ee913fb0adc13ec2b7" Jan 29 12:46:39 crc kubenswrapper[4753]: E0129 12:46:39.889838 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7c24x_openshift-machine-config-operator(b0310995-a7c7-47c3-ae6c-05daaaba92a6)\"" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" podUID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" Jan 29 12:46:50 crc kubenswrapper[4753]: I0129 12:46:50.923545 4753 scope.go:117] "RemoveContainer" containerID="c9612c284467c21e6e66f796daa879bac36be707c5a1e8ee913fb0adc13ec2b7" Jan 29 12:46:50 crc kubenswrapper[4753]: E0129 12:46:50.924851 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7c24x_openshift-machine-config-operator(b0310995-a7c7-47c3-ae6c-05daaaba92a6)\"" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" podUID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" Jan 29 12:47:05 crc kubenswrapper[4753]: I0129 12:47:05.889133 4753 scope.go:117] "RemoveContainer" containerID="c9612c284467c21e6e66f796daa879bac36be707c5a1e8ee913fb0adc13ec2b7" Jan 29 12:47:05 crc kubenswrapper[4753]: E0129 12:47:05.889811 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7c24x_openshift-machine-config-operator(b0310995-a7c7-47c3-ae6c-05daaaba92a6)\"" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" podUID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" Jan 29 12:47:18 crc kubenswrapper[4753]: I0129 12:47:18.889109 4753 scope.go:117] "RemoveContainer" containerID="c9612c284467c21e6e66f796daa879bac36be707c5a1e8ee913fb0adc13ec2b7" Jan 29 12:47:18 crc kubenswrapper[4753]: E0129 12:47:18.890471 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7c24x_openshift-machine-config-operator(b0310995-a7c7-47c3-ae6c-05daaaba92a6)\"" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" podUID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" Jan 29 12:47:29 crc kubenswrapper[4753]: I0129 12:47:29.888763 4753 scope.go:117] "RemoveContainer" containerID="c9612c284467c21e6e66f796daa879bac36be707c5a1e8ee913fb0adc13ec2b7" Jan 29 12:47:29 crc kubenswrapper[4753]: E0129 12:47:29.889416 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7c24x_openshift-machine-config-operator(b0310995-a7c7-47c3-ae6c-05daaaba92a6)\"" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" podUID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" Jan 29 12:47:40 crc kubenswrapper[4753]: I0129 12:47:40.889814 4753 scope.go:117] "RemoveContainer" containerID="c9612c284467c21e6e66f796daa879bac36be707c5a1e8ee913fb0adc13ec2b7" Jan 29 12:47:40 crc kubenswrapper[4753]: E0129 12:47:40.890881 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7c24x_openshift-machine-config-operator(b0310995-a7c7-47c3-ae6c-05daaaba92a6)\"" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" podUID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" Jan 29 12:47:51 crc kubenswrapper[4753]: I0129 12:47:51.961196 4753 scope.go:117] "RemoveContainer" containerID="c9612c284467c21e6e66f796daa879bac36be707c5a1e8ee913fb0adc13ec2b7" Jan 29 12:47:51 crc kubenswrapper[4753]: E0129 12:47:51.963111 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7c24x_openshift-machine-config-operator(b0310995-a7c7-47c3-ae6c-05daaaba92a6)\"" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" podUID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" Jan 29 12:48:05 crc kubenswrapper[4753]: I0129 12:48:05.888889 4753 scope.go:117] "RemoveContainer" containerID="c9612c284467c21e6e66f796daa879bac36be707c5a1e8ee913fb0adc13ec2b7" Jan 29 12:48:05 crc kubenswrapper[4753]: E0129 12:48:05.889750 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7c24x_openshift-machine-config-operator(b0310995-a7c7-47c3-ae6c-05daaaba92a6)\"" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" podUID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" Jan 29 12:48:08 crc kubenswrapper[4753]: I0129 12:48:08.973998 4753 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-hrs4g"] Jan 29 12:48:08 crc kubenswrapper[4753]: E0129 12:48:08.974901 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e66361cf-7d08-49f4-9571-3450b52d2f56" containerName="gather" Jan 29 12:48:08 crc kubenswrapper[4753]: I0129 12:48:08.974948 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="e66361cf-7d08-49f4-9571-3450b52d2f56" containerName="gather" Jan 29 12:48:08 crc kubenswrapper[4753]: E0129 12:48:08.975014 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="11b3cff0-3079-4e2f-8944-05f4441d5c00" containerName="collect-profiles" Jan 29 12:48:08 crc kubenswrapper[4753]: I0129 12:48:08.975021 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="11b3cff0-3079-4e2f-8944-05f4441d5c00" containerName="collect-profiles" Jan 29 12:48:08 crc kubenswrapper[4753]: E0129 12:48:08.975034 4753 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e66361cf-7d08-49f4-9571-3450b52d2f56" containerName="copy" Jan 29 12:48:08 crc kubenswrapper[4753]: I0129 12:48:08.975040 4753 state_mem.go:107] "Deleted CPUSet assignment" podUID="e66361cf-7d08-49f4-9571-3450b52d2f56" containerName="copy" Jan 29 12:48:08 crc kubenswrapper[4753]: I0129 12:48:08.975182 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="11b3cff0-3079-4e2f-8944-05f4441d5c00" containerName="collect-profiles" Jan 29 12:48:08 crc kubenswrapper[4753]: I0129 12:48:08.975206 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="e66361cf-7d08-49f4-9571-3450b52d2f56" containerName="copy" Jan 29 12:48:08 crc kubenswrapper[4753]: I0129 12:48:08.975216 4753 memory_manager.go:354] "RemoveStaleState removing state" podUID="e66361cf-7d08-49f4-9571-3450b52d2f56" containerName="gather" Jan 29 12:48:08 crc kubenswrapper[4753]: I0129 12:48:08.979032 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-hrs4g" Jan 29 12:48:09 crc kubenswrapper[4753]: I0129 12:48:09.069874 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-66xck\" (UniqueName: \"kubernetes.io/projected/1e66a91f-d23d-433c-9d3b-fe19676b37d8-kube-api-access-66xck\") pod \"redhat-marketplace-hrs4g\" (UID: \"1e66a91f-d23d-433c-9d3b-fe19676b37d8\") " pod="openshift-marketplace/redhat-marketplace-hrs4g" Jan 29 12:48:09 crc kubenswrapper[4753]: I0129 12:48:09.070425 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1e66a91f-d23d-433c-9d3b-fe19676b37d8-utilities\") pod \"redhat-marketplace-hrs4g\" (UID: \"1e66a91f-d23d-433c-9d3b-fe19676b37d8\") " pod="openshift-marketplace/redhat-marketplace-hrs4g" Jan 29 12:48:09 crc kubenswrapper[4753]: I0129 12:48:09.070457 4753 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1e66a91f-d23d-433c-9d3b-fe19676b37d8-catalog-content\") pod \"redhat-marketplace-hrs4g\" (UID: \"1e66a91f-d23d-433c-9d3b-fe19676b37d8\") " pod="openshift-marketplace/redhat-marketplace-hrs4g" Jan 29 12:48:09 crc kubenswrapper[4753]: I0129 12:48:09.073493 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-hrs4g"] Jan 29 12:48:09 crc kubenswrapper[4753]: I0129 12:48:09.171742 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-66xck\" (UniqueName: \"kubernetes.io/projected/1e66a91f-d23d-433c-9d3b-fe19676b37d8-kube-api-access-66xck\") pod \"redhat-marketplace-hrs4g\" (UID: \"1e66a91f-d23d-433c-9d3b-fe19676b37d8\") " pod="openshift-marketplace/redhat-marketplace-hrs4g" Jan 29 12:48:09 crc kubenswrapper[4753]: I0129 12:48:09.171928 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1e66a91f-d23d-433c-9d3b-fe19676b37d8-utilities\") pod \"redhat-marketplace-hrs4g\" (UID: \"1e66a91f-d23d-433c-9d3b-fe19676b37d8\") " pod="openshift-marketplace/redhat-marketplace-hrs4g" Jan 29 12:48:09 crc kubenswrapper[4753]: I0129 12:48:09.171947 4753 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1e66a91f-d23d-433c-9d3b-fe19676b37d8-catalog-content\") pod \"redhat-marketplace-hrs4g\" (UID: \"1e66a91f-d23d-433c-9d3b-fe19676b37d8\") " pod="openshift-marketplace/redhat-marketplace-hrs4g" Jan 29 12:48:09 crc kubenswrapper[4753]: I0129 12:48:09.172787 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1e66a91f-d23d-433c-9d3b-fe19676b37d8-utilities\") pod \"redhat-marketplace-hrs4g\" (UID: \"1e66a91f-d23d-433c-9d3b-fe19676b37d8\") " pod="openshift-marketplace/redhat-marketplace-hrs4g" Jan 29 12:48:09 crc kubenswrapper[4753]: I0129 12:48:09.172810 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1e66a91f-d23d-433c-9d3b-fe19676b37d8-catalog-content\") pod \"redhat-marketplace-hrs4g\" (UID: \"1e66a91f-d23d-433c-9d3b-fe19676b37d8\") " pod="openshift-marketplace/redhat-marketplace-hrs4g" Jan 29 12:48:09 crc kubenswrapper[4753]: I0129 12:48:09.193903 4753 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-66xck\" (UniqueName: \"kubernetes.io/projected/1e66a91f-d23d-433c-9d3b-fe19676b37d8-kube-api-access-66xck\") pod \"redhat-marketplace-hrs4g\" (UID: \"1e66a91f-d23d-433c-9d3b-fe19676b37d8\") " pod="openshift-marketplace/redhat-marketplace-hrs4g" Jan 29 12:48:09 crc kubenswrapper[4753]: I0129 12:48:09.376266 4753 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-hrs4g" Jan 29 12:48:09 crc kubenswrapper[4753]: I0129 12:48:09.651341 4753 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-hrs4g"] Jan 29 12:48:10 crc kubenswrapper[4753]: I0129 12:48:10.132638 4753 generic.go:334] "Generic (PLEG): container finished" podID="1e66a91f-d23d-433c-9d3b-fe19676b37d8" containerID="3a0947225a7d11e9de7cbabd2039311246e8645c5bfa5d69a87b0110e677433b" exitCode=0 Jan 29 12:48:10 crc kubenswrapper[4753]: I0129 12:48:10.132689 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hrs4g" event={"ID":"1e66a91f-d23d-433c-9d3b-fe19676b37d8","Type":"ContainerDied","Data":"3a0947225a7d11e9de7cbabd2039311246e8645c5bfa5d69a87b0110e677433b"} Jan 29 12:48:10 crc kubenswrapper[4753]: I0129 12:48:10.132725 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hrs4g" event={"ID":"1e66a91f-d23d-433c-9d3b-fe19676b37d8","Type":"ContainerStarted","Data":"267e1b15854cf2b6a12b374be496559bff96adb1c7e06c736ec3e6a0fbd101a9"} Jan 29 12:48:12 crc kubenswrapper[4753]: I0129 12:48:12.148770 4753 generic.go:334] "Generic (PLEG): container finished" podID="1e66a91f-d23d-433c-9d3b-fe19676b37d8" containerID="52ae7e90613a4c556d5de57332c8cc0cf68bb24e22ce292aff3270db90f3e471" exitCode=0 Jan 29 12:48:12 crc kubenswrapper[4753]: I0129 12:48:12.148874 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hrs4g" event={"ID":"1e66a91f-d23d-433c-9d3b-fe19676b37d8","Type":"ContainerDied","Data":"52ae7e90613a4c556d5de57332c8cc0cf68bb24e22ce292aff3270db90f3e471"} Jan 29 12:48:14 crc kubenswrapper[4753]: I0129 12:48:14.162642 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hrs4g" event={"ID":"1e66a91f-d23d-433c-9d3b-fe19676b37d8","Type":"ContainerStarted","Data":"048c0ebd415afc4a14f1b4f6804fa76a2592e8758fca4d7e252655fd6512aa27"} Jan 29 12:48:14 crc kubenswrapper[4753]: I0129 12:48:14.188740 4753 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-hrs4g" podStartSLOduration=3.164058979 podStartE2EDuration="6.188687478s" podCreationTimestamp="2026-01-29 12:48:08 +0000 UTC" firstStartedPulling="2026-01-29 12:48:10.134273218 +0000 UTC m=+2504.386354693" lastFinishedPulling="2026-01-29 12:48:13.158901737 +0000 UTC m=+2507.410983192" observedRunningTime="2026-01-29 12:48:14.181308336 +0000 UTC m=+2508.433389811" watchObservedRunningTime="2026-01-29 12:48:14.188687478 +0000 UTC m=+2508.440768933" Jan 29 12:48:16 crc kubenswrapper[4753]: I0129 12:48:16.888676 4753 scope.go:117] "RemoveContainer" containerID="c9612c284467c21e6e66f796daa879bac36be707c5a1e8ee913fb0adc13ec2b7" Jan 29 12:48:16 crc kubenswrapper[4753]: E0129 12:48:16.889171 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7c24x_openshift-machine-config-operator(b0310995-a7c7-47c3-ae6c-05daaaba92a6)\"" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" podUID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" Jan 29 12:48:19 crc kubenswrapper[4753]: I0129 12:48:19.377197 4753 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-hrs4g" Jan 29 12:48:19 crc kubenswrapper[4753]: I0129 12:48:19.377602 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-hrs4g" Jan 29 12:48:19 crc kubenswrapper[4753]: I0129 12:48:19.424785 4753 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-hrs4g" Jan 29 12:48:20 crc kubenswrapper[4753]: I0129 12:48:20.243415 4753 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-hrs4g" Jan 29 12:48:20 crc kubenswrapper[4753]: I0129 12:48:20.293739 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-hrs4g"] Jan 29 12:48:22 crc kubenswrapper[4753]: I0129 12:48:22.211003 4753 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-hrs4g" podUID="1e66a91f-d23d-433c-9d3b-fe19676b37d8" containerName="registry-server" containerID="cri-o://048c0ebd415afc4a14f1b4f6804fa76a2592e8758fca4d7e252655fd6512aa27" gracePeriod=2 Jan 29 12:48:23 crc kubenswrapper[4753]: I0129 12:48:23.218962 4753 generic.go:334] "Generic (PLEG): container finished" podID="1e66a91f-d23d-433c-9d3b-fe19676b37d8" containerID="048c0ebd415afc4a14f1b4f6804fa76a2592e8758fca4d7e252655fd6512aa27" exitCode=0 Jan 29 12:48:23 crc kubenswrapper[4753]: I0129 12:48:23.219342 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hrs4g" event={"ID":"1e66a91f-d23d-433c-9d3b-fe19676b37d8","Type":"ContainerDied","Data":"048c0ebd415afc4a14f1b4f6804fa76a2592e8758fca4d7e252655fd6512aa27"} Jan 29 12:48:23 crc kubenswrapper[4753]: I0129 12:48:23.778440 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-hrs4g" Jan 29 12:48:23 crc kubenswrapper[4753]: I0129 12:48:23.942848 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1e66a91f-d23d-433c-9d3b-fe19676b37d8-catalog-content\") pod \"1e66a91f-d23d-433c-9d3b-fe19676b37d8\" (UID: \"1e66a91f-d23d-433c-9d3b-fe19676b37d8\") " Jan 29 12:48:23 crc kubenswrapper[4753]: I0129 12:48:23.942899 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1e66a91f-d23d-433c-9d3b-fe19676b37d8-utilities\") pod \"1e66a91f-d23d-433c-9d3b-fe19676b37d8\" (UID: \"1e66a91f-d23d-433c-9d3b-fe19676b37d8\") " Jan 29 12:48:23 crc kubenswrapper[4753]: I0129 12:48:23.943142 4753 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-66xck\" (UniqueName: \"kubernetes.io/projected/1e66a91f-d23d-433c-9d3b-fe19676b37d8-kube-api-access-66xck\") pod \"1e66a91f-d23d-433c-9d3b-fe19676b37d8\" (UID: \"1e66a91f-d23d-433c-9d3b-fe19676b37d8\") " Jan 29 12:48:23 crc kubenswrapper[4753]: I0129 12:48:23.946897 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1e66a91f-d23d-433c-9d3b-fe19676b37d8-utilities" (OuterVolumeSpecName: "utilities") pod "1e66a91f-d23d-433c-9d3b-fe19676b37d8" (UID: "1e66a91f-d23d-433c-9d3b-fe19676b37d8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:48:23 crc kubenswrapper[4753]: I0129 12:48:23.949507 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1e66a91f-d23d-433c-9d3b-fe19676b37d8-kube-api-access-66xck" (OuterVolumeSpecName: "kube-api-access-66xck") pod "1e66a91f-d23d-433c-9d3b-fe19676b37d8" (UID: "1e66a91f-d23d-433c-9d3b-fe19676b37d8"). InnerVolumeSpecName "kube-api-access-66xck". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 12:48:24 crc kubenswrapper[4753]: I0129 12:48:24.019124 4753 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1e66a91f-d23d-433c-9d3b-fe19676b37d8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1e66a91f-d23d-433c-9d3b-fe19676b37d8" (UID: "1e66a91f-d23d-433c-9d3b-fe19676b37d8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 12:48:24 crc kubenswrapper[4753]: I0129 12:48:24.044769 4753 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-66xck\" (UniqueName: \"kubernetes.io/projected/1e66a91f-d23d-433c-9d3b-fe19676b37d8-kube-api-access-66xck\") on node \"crc\" DevicePath \"\"" Jan 29 12:48:24 crc kubenswrapper[4753]: I0129 12:48:24.044823 4753 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1e66a91f-d23d-433c-9d3b-fe19676b37d8-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 12:48:24 crc kubenswrapper[4753]: I0129 12:48:24.044836 4753 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1e66a91f-d23d-433c-9d3b-fe19676b37d8-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 12:48:24 crc kubenswrapper[4753]: I0129 12:48:24.229164 4753 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hrs4g" event={"ID":"1e66a91f-d23d-433c-9d3b-fe19676b37d8","Type":"ContainerDied","Data":"267e1b15854cf2b6a12b374be496559bff96adb1c7e06c736ec3e6a0fbd101a9"} Jan 29 12:48:24 crc kubenswrapper[4753]: I0129 12:48:24.229275 4753 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-hrs4g" Jan 29 12:48:24 crc kubenswrapper[4753]: I0129 12:48:24.229305 4753 scope.go:117] "RemoveContainer" containerID="048c0ebd415afc4a14f1b4f6804fa76a2592e8758fca4d7e252655fd6512aa27" Jan 29 12:48:24 crc kubenswrapper[4753]: I0129 12:48:24.252309 4753 scope.go:117] "RemoveContainer" containerID="52ae7e90613a4c556d5de57332c8cc0cf68bb24e22ce292aff3270db90f3e471" Jan 29 12:48:24 crc kubenswrapper[4753]: I0129 12:48:24.276256 4753 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-hrs4g"] Jan 29 12:48:24 crc kubenswrapper[4753]: I0129 12:48:24.283265 4753 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-hrs4g"] Jan 29 12:48:24 crc kubenswrapper[4753]: I0129 12:48:24.283668 4753 scope.go:117] "RemoveContainer" containerID="3a0947225a7d11e9de7cbabd2039311246e8645c5bfa5d69a87b0110e677433b" Jan 29 12:48:25 crc kubenswrapper[4753]: I0129 12:48:25.896173 4753 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1e66a91f-d23d-433c-9d3b-fe19676b37d8" path="/var/lib/kubelet/pods/1e66a91f-d23d-433c-9d3b-fe19676b37d8/volumes" Jan 29 12:48:29 crc kubenswrapper[4753]: I0129 12:48:29.888278 4753 scope.go:117] "RemoveContainer" containerID="c9612c284467c21e6e66f796daa879bac36be707c5a1e8ee913fb0adc13ec2b7" Jan 29 12:48:29 crc kubenswrapper[4753]: E0129 12:48:29.888851 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7c24x_openshift-machine-config-operator(b0310995-a7c7-47c3-ae6c-05daaaba92a6)\"" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" podUID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" Jan 29 12:48:42 crc kubenswrapper[4753]: I0129 12:48:42.889150 4753 scope.go:117] "RemoveContainer" containerID="c9612c284467c21e6e66f796daa879bac36be707c5a1e8ee913fb0adc13ec2b7" Jan 29 12:48:42 crc kubenswrapper[4753]: E0129 12:48:42.889903 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7c24x_openshift-machine-config-operator(b0310995-a7c7-47c3-ae6c-05daaaba92a6)\"" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" podUID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" Jan 29 12:48:54 crc kubenswrapper[4753]: I0129 12:48:54.082112 4753 scope.go:117] "RemoveContainer" containerID="c9612c284467c21e6e66f796daa879bac36be707c5a1e8ee913fb0adc13ec2b7" Jan 29 12:48:54 crc kubenswrapper[4753]: E0129 12:48:54.082934 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7c24x_openshift-machine-config-operator(b0310995-a7c7-47c3-ae6c-05daaaba92a6)\"" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" podUID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" Jan 29 12:49:05 crc kubenswrapper[4753]: I0129 12:49:05.888328 4753 scope.go:117] "RemoveContainer" containerID="c9612c284467c21e6e66f796daa879bac36be707c5a1e8ee913fb0adc13ec2b7" Jan 29 12:49:05 crc kubenswrapper[4753]: E0129 12:49:05.889036 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7c24x_openshift-machine-config-operator(b0310995-a7c7-47c3-ae6c-05daaaba92a6)\"" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" podUID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" Jan 29 12:49:20 crc kubenswrapper[4753]: I0129 12:49:20.889557 4753 scope.go:117] "RemoveContainer" containerID="c9612c284467c21e6e66f796daa879bac36be707c5a1e8ee913fb0adc13ec2b7" Jan 29 12:49:20 crc kubenswrapper[4753]: E0129 12:49:20.890205 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7c24x_openshift-machine-config-operator(b0310995-a7c7-47c3-ae6c-05daaaba92a6)\"" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" podUID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" Jan 29 12:49:34 crc kubenswrapper[4753]: I0129 12:49:34.889309 4753 scope.go:117] "RemoveContainer" containerID="c9612c284467c21e6e66f796daa879bac36be707c5a1e8ee913fb0adc13ec2b7" Jan 29 12:49:34 crc kubenswrapper[4753]: E0129 12:49:34.890189 4753 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-7c24x_openshift-machine-config-operator(b0310995-a7c7-47c3-ae6c-05daaaba92a6)\"" pod="openshift-machine-config-operator/machine-config-daemon-7c24x" podUID="b0310995-a7c7-47c3-ae6c-05daaaba92a6" var/home/core/zuul-output/logs/crc-cloud-workdir-crc-all-logs.tar.gz0000644000175000000000000000005515136653551024457 0ustar coreroot  Om77'(var/home/core/zuul-output/logs/crc-cloud/0000755000175000000000000000000015136653552017375 5ustar corerootvar/home/core/zuul-output/artifacts/0000755000175000017500000000000015136646173016521 5ustar corecorevar/home/core/zuul-output/docs/0000755000175000017500000000000015136646173015471 5ustar corecore